repo_name
stringclasses 6
values | pr_number
int64 512
78.9k
| pr_title
stringlengths 3
144
| pr_description
stringlengths 0
30.3k
| author
stringlengths 2
21
| date_created
timestamp[ns, tz=UTC] | date_merged
timestamp[ns, tz=UTC] | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 17
30.4k
| filepath
stringlengths 9
210
| before_content
stringlengths 0
112M
| after_content
stringlengths 0
112M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dotnet/runtime | 66,018 | Some more clean up for RegexOptions.NonBacktracking | - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | stephentoub | 2022-03-01T18:59:28Z | 2022-03-04T15:52:45Z | 47191c04d8aeca28adbb6fd1ce0f878a87655aa4 | 6dcefe002035fa19c3288d54d8d10f6533cb94fc | Some more clean up for RegexOptions.NonBacktracking. - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | ./src/libraries/System.Reflection.Metadata/src/System/Reflection/Metadata/TypeSystem/DeclarativeSecurityAttribute.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Reflection.Metadata.Ecma335;
namespace System.Reflection.Metadata
{
public readonly struct DeclarativeSecurityAttribute
{
private readonly MetadataReader _reader;
// Workaround: JIT doesn't generate good code for nested structures, so use RowId.
private readonly int _rowId;
internal DeclarativeSecurityAttribute(MetadataReader reader, int rowId)
{
Debug.Assert(reader != null);
Debug.Assert(rowId != 0);
_reader = reader;
_rowId = rowId;
}
public DeclarativeSecurityAction Action
{
get
{
return _reader.DeclSecurityTable.GetAction(_rowId);
}
}
public EntityHandle Parent
{
get
{
return _reader.DeclSecurityTable.GetParent(_rowId);
}
}
public BlobHandle PermissionSet
{
get
{
return _reader.DeclSecurityTable.GetPermissionSet(_rowId);
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Reflection.Metadata.Ecma335;
namespace System.Reflection.Metadata
{
public readonly struct DeclarativeSecurityAttribute
{
private readonly MetadataReader _reader;
// Workaround: JIT doesn't generate good code for nested structures, so use RowId.
private readonly int _rowId;
internal DeclarativeSecurityAttribute(MetadataReader reader, int rowId)
{
Debug.Assert(reader != null);
Debug.Assert(rowId != 0);
_reader = reader;
_rowId = rowId;
}
public DeclarativeSecurityAction Action
{
get
{
return _reader.DeclSecurityTable.GetAction(_rowId);
}
}
public EntityHandle Parent
{
get
{
return _reader.DeclSecurityTable.GetParent(_rowId);
}
}
public BlobHandle PermissionSet
{
get
{
return _reader.DeclSecurityTable.GetPermissionSet(_rowId);
}
}
}
}
| -1 |
dotnet/runtime | 66,018 | Some more clean up for RegexOptions.NonBacktracking | - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | stephentoub | 2022-03-01T18:59:28Z | 2022-03-04T15:52:45Z | 47191c04d8aeca28adbb6fd1ce0f878a87655aa4 | 6dcefe002035fa19c3288d54d8d10f6533cb94fc | Some more clean up for RegexOptions.NonBacktracking. - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest165/Generated165.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated165.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated165.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,018 | Some more clean up for RegexOptions.NonBacktracking | - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | stephentoub | 2022-03-01T18:59:28Z | 2022-03-04T15:52:45Z | 47191c04d8aeca28adbb6fd1ce0f878a87655aa4 | 6dcefe002035fa19c3288d54d8d10f6533cb94fc | Some more clean up for RegexOptions.NonBacktracking. - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | ./src/tests/JIT/opt/AssertionPropagation/NullCheckAssertion2.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. -->
<DebugType>None</DebugType>
<Optimize>True</Optimize>
<NoStandardLib>True</NoStandardLib>
<Noconfig>True</Noconfig>
</PropertyGroup>
<ItemGroup>
<Compile Include="NullCheckAssertion2.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. -->
<DebugType>None</DebugType>
<Optimize>True</Optimize>
<NoStandardLib>True</NoStandardLib>
<Noconfig>True</Noconfig>
</PropertyGroup>
<ItemGroup>
<Compile Include="NullCheckAssertion2.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,018 | Some more clean up for RegexOptions.NonBacktracking | - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | stephentoub | 2022-03-01T18:59:28Z | 2022-03-04T15:52:45Z | 47191c04d8aeca28adbb6fd1ce0f878a87655aa4 | 6dcefe002035fa19c3288d54d8d10f6533cb94fc | Some more clean up for RegexOptions.NonBacktracking. - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | ./src/tests/JIT/jit64/gc/misc/struct1_5.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="struct1_5.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="struct1_5.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,018 | Some more clean up for RegexOptions.NonBacktracking | - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | stephentoub | 2022-03-01T18:59:28Z | 2022-03-04T15:52:45Z | 47191c04d8aeca28adbb6fd1ce0f878a87655aa4 | 6dcefe002035fa19c3288d54d8d10f6533cb94fc | Some more clean up for RegexOptions.NonBacktracking. - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | ./src/tests/GC/Scenarios/GCSimulator/GCSimulator_154.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<GCStressIncompatible>true</GCStressIncompatible>
<CLRTestExecutionArguments>-t 1 -tp 0 -dz 17 -sdz 8517 -dc 10000 -sdc 5000 -lt 4 -f -dp 0.4 -dw 0.4</CLRTestExecutionArguments>
<IsGCSimulatorTest>true</IsGCSimulatorTest>
<CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="GCSimulator.cs" />
<Compile Include="lifetimefx.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<GCStressIncompatible>true</GCStressIncompatible>
<CLRTestExecutionArguments>-t 1 -tp 0 -dz 17 -sdz 8517 -dc 10000 -sdc 5000 -lt 4 -f -dp 0.4 -dw 0.4</CLRTestExecutionArguments>
<IsGCSimulatorTest>true</IsGCSimulatorTest>
<CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="GCSimulator.cs" />
<Compile Include="lifetimefx.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,018 | Some more clean up for RegexOptions.NonBacktracking | - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | stephentoub | 2022-03-01T18:59:28Z | 2022-03-04T15:52:45Z | 47191c04d8aeca28adbb6fd1ce0f878a87655aa4 | 6dcefe002035fa19c3288d54d8d10f6533cb94fc | Some more clean up for RegexOptions.NonBacktracking. - Deleted some dead code
- Avoiding passing things around as IEnumerable when we could pass a stronger type
- Cleaned up BitVector and made it a struct
- Moved code out of the Algebras folder into the main folder | ./src/coreclr/pal/tests/palsuite/c_runtime/vprintf/test2/test2.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Test #2 for the vprintf function. Tests the string specifier
** (%s).
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../vprintf.h"
PALTEST(c_runtime_vprintf_test2_paltest_vprintf_test2, "c_runtime/vprintf/test2/paltest_vprintf_test2")
{
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoStrTest("foo %s", "bar", "foo bar");
DoStrTest("foo %hs", "bar", "foo bar");
DoWStrTest("foo %ls", convert("bar"), "foo bar");
DoWStrTest("foo %ws", convert("bar"), "foo bar");
DoStrTest("foo %Ls", "bar", "foo bar");
DoStrTest("foo %I64s", "bar", "foo bar");
DoStrTest("foo %5s", "bar", "foo bar");
DoStrTest("foo %.2s", "bar", "foo ba");
DoStrTest("foo %5.2s", "bar", "foo ba");
DoStrTest("foo %-5s", "bar", "foo bar ");
DoStrTest("foo %05s", "bar", "foo 00bar");
DoStrTest("foo %s", NULL, "foo (null)");
DoStrTest("foo %hs", NULL, "foo (null)");
DoWStrTest("foo %ls", NULL, "foo (null)");
DoWStrTest("foo %ws", NULL, "foo (null)");
DoStrTest("foo %Ls", NULL, "foo (null)");
DoStrTest("foo %I64s", NULL, "foo (null)");
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test2.c
**
** Purpose: Test #2 for the vprintf function. Tests the string specifier
** (%s).
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../vprintf.h"
PALTEST(c_runtime_vprintf_test2_paltest_vprintf_test2, "c_runtime/vprintf/test2/paltest_vprintf_test2")
{
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoStrTest("foo %s", "bar", "foo bar");
DoStrTest("foo %hs", "bar", "foo bar");
DoWStrTest("foo %ls", convert("bar"), "foo bar");
DoWStrTest("foo %ws", convert("bar"), "foo bar");
DoStrTest("foo %Ls", "bar", "foo bar");
DoStrTest("foo %I64s", "bar", "foo bar");
DoStrTest("foo %5s", "bar", "foo bar");
DoStrTest("foo %.2s", "bar", "foo ba");
DoStrTest("foo %5.2s", "bar", "foo ba");
DoStrTest("foo %-5s", "bar", "foo bar ");
DoStrTest("foo %05s", "bar", "foo 00bar");
DoStrTest("foo %s", NULL, "foo (null)");
DoStrTest("foo %hs", NULL, "foo (null)");
DoWStrTest("foo %ls", NULL, "foo (null)");
DoWStrTest("foo %ws", NULL, "foo (null)");
DoStrTest("foo %Ls", NULL, "foo (null)");
DoStrTest("foo %I64s", NULL, "foo (null)");
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/metadata/external-only.c | /**
* Functions that are in the (historical) embedding API
* but must not be used by the runtime. Often
* just a thin wrapper mono_foo => mono_foo_internal.
*
* Copyright 2018 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
// FIXME In order to confirm this is all extern_only,
// a variant of the runtime should be linked without it.
#include "config.h"
#include "class-internals.h"
#include "domain-internals.h"
#include "mono-hash-internals.h"
#include "mono-config-internals.h"
#include "object-internals.h"
#include "class-init.h"
#include <mono/metadata/assembly.h>
#include "marshal.h"
#include <mono/metadata/object.h>
#include "assembly-internals.h"
#include "external-only.h"
#include <mono/metadata/threads.h>
#include "threads-types.h"
#include "jit-info.h"
/**
* mono_gchandle_new:
* \param obj managed object to get a handle for
* \param pinned whether the object should be pinned
* This returns a handle that wraps the object, this is used to keep a
* reference to a managed object from the unmanaged world and preventing the
* object from being disposed.
*
* If \p pinned is false the address of the object can not be obtained, if it is
* true the address of the object can be obtained. This will also pin the
* object so it will not be possible by a moving garbage collector to move the
* object.
*
* \returns a handle that can be used to access the object from unmanaged code.
*/
uint32_t
mono_gchandle_new (MonoObject *obj, mono_bool pinned)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (uint32_t, (uint32_t)(size_t)mono_gchandle_new_internal (obj, pinned));
}
MonoGCHandle
mono_gchandle_new_v2 (MonoObject *obj, mono_bool pinned)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoGCHandle, mono_gchandle_new_internal (obj, pinned));
}
/**
* mono_gchandle_new_weakref:
* \param obj managed object to get a handle for
* \param track_resurrection Determines how long to track the object, if this is set to TRUE, the object is tracked after finalization, if FALSE, the object is only tracked up until the point of finalization.
*
* This returns a weak handle that wraps the object, this is used to
* keep a reference to a managed object from the unmanaged world.
* Unlike the \c mono_gchandle_new_internal the object can be reclaimed by the
* garbage collector. In this case the value of the GCHandle will be
* set to zero.
*
* If \p track_resurrection is TRUE the object will be tracked through
* finalization and if the object is resurrected during the execution
* of the finalizer, then the returned weakref will continue to hold
* a reference to the object. If \p track_resurrection is FALSE, then
* the weak reference's target will become NULL as soon as the object
* is passed on to the finalizer.
*
* \returns a handle that can be used to access the object from
* unmanaged code.
*/
uint32_t
mono_gchandle_new_weakref (MonoObject *obj, mono_bool track_resurrection)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (uint32_t, (uint32_t)(size_t)mono_gchandle_new_weakref_internal (obj, track_resurrection));
}
MonoGCHandle
mono_gchandle_new_weakref_v2 (MonoObject *obj, mono_bool track_resurrection)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoGCHandle, mono_gchandle_new_weakref_internal (obj, track_resurrection));
}
/**
* mono_gchandle_get_target:
* \param gchandle a GCHandle's handle.
*
* The handle was previously created by calling \c mono_gchandle_new or
* \c mono_gchandle_new_weakref.
*
* \returns a pointer to the \c MonoObject* represented by the handle or
* NULL for a collected object if using a weakref handle.
*/
MonoObject*
mono_gchandle_get_target (uint32_t gchandle)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoObject*, mono_gchandle_get_target_internal ((MonoGCHandle)(size_t)gchandle));
}
MonoObject*
mono_gchandle_get_target_v2 (MonoGCHandle gchandle)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoObject*, mono_gchandle_get_target_internal (gchandle));
}
/**
* mono_gchandle_free:
* \param gchandle a GCHandle's handle.
*
* Frees the \p gchandle handle. If there are no outstanding
* references, the garbage collector can reclaim the memory of the
* object wrapped.
*/
void
mono_gchandle_free (uint32_t gchandle)
{
/* Xamarin.Mac and Xamarin.iOS can call this from a worker thread
* that's not attached to the runtime. This is okay for SGen because
* the gchandle code is lockfree. SGen calls back into Mono which
* fires a profiler event, so the profiler must be prepared to be
* called from threads that aren't attached to Mono. */
MONO_EXTERNAL_ONLY_VOID (mono_gchandle_free_internal ((MonoGCHandle)(size_t)gchandle));
}
void
mono_gchandle_free_v2 (MonoGCHandle gchandle)
{
MONO_EXTERNAL_ONLY_VOID (mono_gchandle_free_internal (gchandle));
}
/* GC write barriers support */
/**
* mono_gc_wbarrier_set_field:
* \param obj object containing the destination field
* \param field_ptr address of field inside the object
* \param value reference to the object to be stored
* Stores an object reference inside another object, executing a write barrier
* if needed.
*/
void
mono_gc_wbarrier_set_field (MonoObject *obj, void* field_ptr, MonoObject* value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_set_field_internal (obj, field_ptr, value));
}
/**
* mono_gc_wbarrier_set_arrayref:
* \param arr array containing the destination slot
* \param slot_ptr address of slot inside the array
* \param value reference to the object to be stored
* Stores an object reference inside an array of objects, executing a write
* barrier if needed.
*/
void
mono_gc_wbarrier_set_arrayref (MonoArray *arr, void* slot_ptr, MonoObject* value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_set_arrayref_internal (arr, slot_ptr, value));
}
/**
* mono_gc_wbarrier_arrayref_copy:
* \param dest_ptr destination slot address
* \param src_ptr source slot address
* \param count number of references to copy
* Copies \p count references from one array to another, executing a write
* barrier if needed.
*/
void
mono_gc_wbarrier_arrayref_copy (void* dest_ptr, /*const*/ void* src_ptr, int count)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_arrayref_copy_internal (dest_ptr, src_ptr, count));
}
/**
* mono_gc_wbarrier_generic_store:
* \param ptr address of field
* \param obj object to store
* Stores the \p value object inside the field represented by \p ptr,
* executing a write barrier if needed.
*/
void
mono_gc_wbarrier_generic_store (void* ptr, MonoObject* value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_generic_store_internal (ptr, value));
}
/**
* mono_gc_wbarrier_generic_store_atomic:
* Same as \c mono_gc_wbarrier_generic_store but performs the store
* as an atomic operation with release semantics.
*/
void
mono_gc_wbarrier_generic_store_atomic (void *ptr, MonoObject *value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_generic_store_atomic_internal (ptr, value));
}
/**
* mono_gc_wbarrier_generic_nostore:
* Executes a write barrier for an address, informing the GC that
* the reference stored at that address has been changed.
*/
void
mono_gc_wbarrier_generic_nostore (void* ptr)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_generic_nostore_internal (ptr));
}
/**
* mono_gc_wbarrier_object_copy:
* \param dest destination address
* \param src source address
* \param count number of elements to copy
* \param klass type of elements to copy
* Copies \p count elements of type \p klass from \p src address to
* \dest address, executing any necessary write barriers.
*/
void
mono_gc_wbarrier_value_copy (void* dest, /*const*/ void* src, int count, MonoClass *klass)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_value_copy_internal (dest, src, count, klass));
}
/**
* mono_gc_wbarrier_object_copy:
* \param obj destination object
* \param src source object
* Copies contents of \p src to \p obj, executing any necessary write
* barriers.
*/
void
mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_object_copy_internal (obj, src));
}
/**
* mono_class_init:
* \param klass the class to initialize
*
* Compute the \c instance_size, \c class_size and other infos that cannot be
* computed at \c mono_class_get time. Also compute vtable_size if possible.
* Initializes the following fields in \p klass:
* - all the fields initialized by \c mono_class_init_sizes
* - has_cctor
* - ghcimpl
* - inited
*
* LOCKING: Acquires the loader lock.
*
* \returns TRUE on success or FALSE if there was a problem in loading
* the type (incorrect assemblies, missing assemblies, methods, etc).
*/
mono_bool
mono_class_init (MonoClass *klass)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (gboolean, mono_class_init_internal (klass));
}
/**
* mono_g_hash_table_new_type:
*/
MonoGHashTable*
mono_g_hash_table_new_type (GHashFunc hash_func, GEqualFunc key_equal_func, MonoGHashGCType type, MonoGCRootSource source, void *key, const char *msg)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoGHashTable*, mono_g_hash_table_new_type_internal (hash_func, key_equal_func, type, source, key, msg));
}
/**
* mono_config_for_assembly:
*/
void
mono_config_for_assembly (MonoImage *assembly)
{
}
/**
* mono_class_get_property_from_name:
* \param klass a class
* \param name name of the property to lookup in the specified class
*
* Use this method to lookup a property in a class
* \returns the \c MonoProperty with the given name, or NULL if the property
* does not exist on the \p klass.
*/
MonoProperty*
mono_class_get_property_from_name (MonoClass *klass, const char *name)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoProperty*, mono_class_get_property_from_name_internal (klass, name));
}
/**
* mono_class_is_subclass_of:
* \param klass class to probe if it is a subclass of another one
* \param klassc the class we suspect is the base class
* \param check_interfaces whether we should perform interface checks
*
* This method determines whether \p klass is a subclass of \p klassc.
*
* If the \p check_interfaces flag is set, then if \p klassc is an interface
* this method return TRUE if the \p klass implements the interface or
* if \p klass is an interface, if one of its base classes is \p klass.
*
* If \p check_interfaces is false, then if \p klass is not an interface,
* it returns TRUE if the \p klass is a subclass of \p klassc.
*
* if \p klass is an interface and \p klassc is \c System.Object, then this function
* returns TRUE.
*
*/
gboolean
mono_class_is_subclass_of (MonoClass *klass, MonoClass *klassc, gboolean check_interfaces)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (gboolean, mono_class_is_subclass_of_internal (klass, klassc, check_interfaces));
}
/**
* mono_domain_set_internal:
* \param domain the new domain
*
* Sets the current domain to \p domain.
*/
void
mono_domain_set_internal (MonoDomain *domain)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_domain_set_internal_with_options (domain, TRUE));
}
/**
* mono_domain_set:
* \param domain domain
* \param force force setting.
*
* Set the current appdomain to \p domain. If \p force is set, set it even
* if it is being unloaded.
*
* \returns TRUE on success; FALSE if the domain is unloaded
*/
gboolean
mono_domain_set (MonoDomain *domain, gboolean force)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_domain_set_internal_with_options (domain, TRUE));
return TRUE;
}
/**
* mono_assembly_name_free:
* \param aname assembly name to free
*
* Frees the provided assembly name object.
* (it does not frees the object itself, only the name members).
*/
void
mono_assembly_name_free (MonoAssemblyName *aname)
{
if (!aname)
return;
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_assembly_name_free_internal (aname));
}
/**
* mono_thread_manage:
*
*/
void
mono_thread_manage (void)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_thread_manage_internal ());
}
void
mono_register_config_for_assembly (const char* assembly_name, const char* config_xml)
{
}
/**
* mono_domain_free:
* \param domain the domain to release
* \param force if TRUE, it allows the root domain to be released (used at shutdown only).
*
* This releases the resources associated with the specific domain.
* This is a low-level function that is invoked by the AppDomain infrastructure
* when necessary.
*
* In theory, this is dead code on netcore and thus does not need to be ALC-aware.
*/
void
mono_domain_free (MonoDomain *domain, gboolean force)
{
g_assert_not_reached ();
}
/**
* mono_domain_get_id:
*
* A domain ID is guaranteed to be unique for as long as the domain
* using it is alive. It may be reused later once the domain has been
* unloaded.
*
* \returns The unique ID for \p domain.
*/
gint32
mono_domain_get_id (MonoDomain *domain)
{
return domain->domain_id;
}
/**
* mono_domain_get_friendly_name:
*
* The returned string's lifetime is the same as \p domain's. Consider
* copying it if you need to store it somewhere.
*
* \returns The friendly name of \p domain. Can be NULL if not yet set.
*/
const char *
mono_domain_get_friendly_name (MonoDomain *domain)
{
return domain->friendly_name;
}
/**
* mono_domain_is_unloading:
*/
gboolean
mono_domain_is_unloading (MonoDomain *domain)
{
return FALSE;
}
/**
* mono_domain_from_appdomain:
*/
MonoDomain *
mono_domain_from_appdomain (MonoAppDomain *appdomain_raw)
{
return mono_get_root_domain ();
}
/**
* mono_context_set:
*/
void
mono_context_set (MonoAppContext * new_context)
{
}
/**
* mono_context_get:
*
* Returns: the current Mono Application Context.
*/
MonoAppContext *
mono_context_get (void)
{
return NULL;
}
/**
* mono_context_get_id:
* \param context the context to operate on.
*
* Context IDs are guaranteed to be unique for the duration of a Mono
* process; they are never reused.
*
* \returns The unique ID for \p context.
*/
gint32
mono_context_get_id (MonoAppContext *context)
{
return context->context_id;
}
/**
* mono_context_get_domain_id:
* \param context the context to operate on.
* \returns The ID of the domain that \p context was created in.
*/
gint32
mono_context_get_domain_id (MonoAppContext *context)
{
return context->domain_id;
}
/**
* mono_string_equal:
* \param s1 First string to compare
* \param s2 Second string to compare
*
* Compares two \c MonoString* instances ordinally for equality.
*
* \returns FALSE if the strings differ.
*/
gboolean
mono_string_equal (MonoString *s1, MonoString *s2)
{
MONO_EXTERNAL_ONLY (gboolean, mono_string_equal_internal (s1, s2));
}
/**
* mono_string_hash:
* \param s the string to hash
*
* Compute the hash for a \c MonoString*
* \returns the hash for the string.
*/
guint
mono_string_hash (MonoString *s)
{
MONO_EXTERNAL_ONLY (guint, mono_string_hash_internal (s));
}
/**
* mono_domain_create:
*
* Creates a new application domain, the unmanaged representation
* of the actual domain.
*
* Application domains provide an isolation facilty for assemblies. You
* can load assemblies and execute code in them that will not be visible
* to other application domains. This is a runtime-based virtualization
* technology.
*
* It is possible to unload domains, which unloads the assemblies and
* data that was allocated in that domain.
*
* When a domain is created a mempool is allocated for domain-specific
* structures, along a dedicated code manager to hold code that is
* associated with the domain.
*
* \returns New initialized \c MonoDomain, with no configuration or assemblies
* loaded into it.
*/
MonoDomain *
mono_domain_create (void)
{
g_assert_not_reached ();
}
/**
* mono_domain_get_by_id:
* \param domainid the ID
* \returns the domain for a specific domain id.
*/
MonoDomain *
mono_domain_get_by_id (gint32 domainid)
{
MonoDomain * domain = mono_get_root_domain ();
if (domain->domain_id == domainid)
return domain;
else
return NULL;
}
/**
* mono_domain_assembly_open:
* \param domain the application domain
* \param name file name of the assembly
*/
MonoAssembly *
mono_domain_assembly_open (MonoDomain *domain, const char *name)
{
MonoAssembly *result;
MONO_ENTER_GC_UNSAFE;
result = mono_domain_assembly_open_internal (mono_alc_get_default (), name);
MONO_EXIT_GC_UNSAFE;
return result;
}
void
mono_domain_ensure_entry_assembly (MonoDomain *domain, MonoAssembly *assembly)
{
mono_runtime_ensure_entry_assembly (assembly);
}
/**
* mono_domain_foreach:
* \param func function to invoke with the domain data
* \param user_data user-defined pointer that is passed to the supplied \p func fo reach domain
*
* Use this method to safely iterate over all the loaded application
* domains in the current runtime. The provided \p func is invoked with a
* pointer to the \c MonoDomain and is given the value of the \p user_data
* parameter which can be used to pass state to your called routine.
*/
void
mono_domain_foreach (MonoDomainFunc func, gpointer user_data)
{
MONO_ENTER_GC_UNSAFE;
func (mono_get_root_domain (), user_data);
MONO_EXIT_GC_UNSAFE;
}
/**
* mono_context_init:
* \param domain The domain where the \c System.Runtime.Remoting.Context.Context is initialized
* Initializes the \p domain's default \c System.Runtime.Remoting 's Context.
*/
void
mono_context_init (MonoDomain *domain)
{
}
/**
* mono_domain_set_config:
* \param domain \c MonoDomain initialized with the appdomain we want to change
* \param base_dir new base directory for the appdomain
* \param config_file_name path to the new configuration for the app domain
*
* Used to set the system configuration for an appdomain
*
* Without using this, embedded builds will get 'System.Configuration.ConfigurationErrorsException:
* Error Initializing the configuration system. ---> System.ArgumentException:
* The 'ExeConfigFilename' argument cannot be null.' for some managed calls.
*/
void
mono_domain_set_config (MonoDomain *domain, const char *base_dir, const char *config_file_name)
{
g_assert_not_reached ();
}
/**
* mono_domain_try_type_resolve:
* \param domain application domain in which to resolve the type
* \param name the name of the type to resolve or NULL.
* \param typebuilder A \c System.Reflection.Emit.TypeBuilder, used if name is NULL.
*
* This routine invokes the internal \c System.AppDomain.DoTypeResolve and returns
* the assembly that matches name, or ((TypeBuilder)typebuilder).FullName.
*
* \returns A \c MonoReflectionAssembly or NULL if not found
*/
MonoReflectionAssembly *
mono_domain_try_type_resolve (MonoDomain *domain, char *name, MonoObject *typebuilder_raw)
{
HANDLE_FUNCTION_ENTER ();
g_assert (domain);
g_assert (name || typebuilder_raw);
ERROR_DECL (error);
MonoReflectionAssemblyHandle ret = NULL_HANDLE_INIT;
// This will not work correctly on netcore
if (name) {
MonoStringHandle name_handle = mono_string_new_handle (name, error);
goto_if_nok (error, exit);
ret = mono_domain_try_type_resolve_name (NULL, name_handle, error);
} else {
// TODO: make this work on netcore when working on SRE.TypeBuilder
g_assert_not_reached ();
}
exit:
mono_error_cleanup (error);
HANDLE_FUNCTION_RETURN_OBJ (ret);
}
/**
* mono_jit_info_table_find:
* \param domain Domain that you want to look up
* \param addr Points to an address with JITed code.
*
* Use this function to obtain a \c MonoJitInfo* object that can be used to get
* some statistics. You should provide both the \p domain on which you will be
* performing the probe, and an address. Since application domains can share code
* the same address can be in use by multiple domains at once.
*
* This does not return any results for trampolines.
*
* \returns NULL if the address does not belong to JITed code (it might be native
* code or a trampoline) or a valid pointer to a \c MonoJitInfo* .
*/
MonoJitInfo*
mono_jit_info_table_find (MonoDomain *domain, gpointer addr)
{
return mono_jit_info_table_find_internal (addr, TRUE, FALSE);
}
/**
* mono_domain_owns_vtable_slot:
* \returns Whether \p vtable_slot is inside a vtable which belongs to \p domain.
*/
gboolean
mono_domain_owns_vtable_slot (MonoDomain *domain, gpointer vtable_slot)
{
return mono_mem_manager_mp_contains_addr (mono_mem_manager_get_ambient (), vtable_slot);
}
| /**
* Functions that are in the (historical) embedding API
* but must not be used by the runtime. Often
* just a thin wrapper mono_foo => mono_foo_internal.
*
* Copyright 2018 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
// FIXME In order to confirm this is all extern_only,
// a variant of the runtime should be linked without it.
#include "config.h"
#include "class-internals.h"
#include "domain-internals.h"
#include "mono-hash-internals.h"
#include "mono-config-internals.h"
#include "object-internals.h"
#include "class-init.h"
#include <mono/metadata/assembly.h>
#include "marshal.h"
#include <mono/metadata/object.h>
#include "assembly-internals.h"
#include "external-only.h"
#include <mono/metadata/threads.h>
#include <mono/metadata/mono-private-unstable.h>
#include "threads-types.h"
#include "jit-info.h"
/**
* mono_gchandle_new:
* \param obj managed object to get a handle for
* \param pinned whether the object should be pinned
* This returns a handle that wraps the object, this is used to keep a
* reference to a managed object from the unmanaged world and preventing the
* object from being disposed.
*
* If \p pinned is false the address of the object can not be obtained, if it is
* true the address of the object can be obtained. This will also pin the
* object so it will not be possible by a moving garbage collector to move the
* object.
*
* \returns a handle that can be used to access the object from unmanaged code.
*/
uint32_t
mono_gchandle_new (MonoObject *obj, mono_bool pinned)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (uint32_t, (uint32_t)(size_t)mono_gchandle_new_internal (obj, pinned));
}
MonoGCHandle
mono_gchandle_new_v2 (MonoObject *obj, mono_bool pinned)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoGCHandle, mono_gchandle_new_internal (obj, pinned));
}
/**
* mono_gchandle_new_weakref:
* \param obj managed object to get a handle for
* \param track_resurrection Determines how long to track the object, if this is set to TRUE, the object is tracked after finalization, if FALSE, the object is only tracked up until the point of finalization.
*
* This returns a weak handle that wraps the object, this is used to
* keep a reference to a managed object from the unmanaged world.
* Unlike the \c mono_gchandle_new_internal the object can be reclaimed by the
* garbage collector. In this case the value of the GCHandle will be
* set to zero.
*
* If \p track_resurrection is TRUE the object will be tracked through
* finalization and if the object is resurrected during the execution
* of the finalizer, then the returned weakref will continue to hold
* a reference to the object. If \p track_resurrection is FALSE, then
* the weak reference's target will become NULL as soon as the object
* is passed on to the finalizer.
*
* \returns a handle that can be used to access the object from
* unmanaged code.
*/
uint32_t
mono_gchandle_new_weakref (MonoObject *obj, mono_bool track_resurrection)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (uint32_t, (uint32_t)(size_t)mono_gchandle_new_weakref_internal (obj, track_resurrection));
}
MonoGCHandle
mono_gchandle_new_weakref_v2 (MonoObject *obj, mono_bool track_resurrection)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoGCHandle, mono_gchandle_new_weakref_internal (obj, track_resurrection));
}
/**
* mono_gchandle_get_target:
* \param gchandle a GCHandle's handle.
*
* The handle was previously created by calling \c mono_gchandle_new or
* \c mono_gchandle_new_weakref.
*
* \returns a pointer to the \c MonoObject* represented by the handle or
* NULL for a collected object if using a weakref handle.
*/
MonoObject*
mono_gchandle_get_target (uint32_t gchandle)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoObject*, mono_gchandle_get_target_internal ((MonoGCHandle)(size_t)gchandle));
}
MonoObject*
mono_gchandle_get_target_v2 (MonoGCHandle gchandle)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoObject*, mono_gchandle_get_target_internal (gchandle));
}
/**
* mono_gchandle_free:
* \param gchandle a GCHandle's handle.
*
* Frees the \p gchandle handle. If there are no outstanding
* references, the garbage collector can reclaim the memory of the
* object wrapped.
*/
void
mono_gchandle_free (uint32_t gchandle)
{
/* Xamarin.Mac and Xamarin.iOS can call this from a worker thread
* that's not attached to the runtime. This is okay for SGen because
* the gchandle code is lockfree. SGen calls back into Mono which
* fires a profiler event, so the profiler must be prepared to be
* called from threads that aren't attached to Mono. */
MONO_EXTERNAL_ONLY_VOID (mono_gchandle_free_internal ((MonoGCHandle)(size_t)gchandle));
}
void
mono_gchandle_free_v2 (MonoGCHandle gchandle)
{
MONO_EXTERNAL_ONLY_VOID (mono_gchandle_free_internal (gchandle));
}
/* GC write barriers support */
/**
* mono_gc_wbarrier_set_field:
* \param obj object containing the destination field
* \param field_ptr address of field inside the object
* \param value reference to the object to be stored
* Stores an object reference inside another object, executing a write barrier
* if needed.
*/
void
mono_gc_wbarrier_set_field (MonoObject *obj, void* field_ptr, MonoObject* value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_set_field_internal (obj, field_ptr, value));
}
/**
* mono_gc_wbarrier_set_arrayref:
* \param arr array containing the destination slot
* \param slot_ptr address of slot inside the array
* \param value reference to the object to be stored
* Stores an object reference inside an array of objects, executing a write
* barrier if needed.
*/
void
mono_gc_wbarrier_set_arrayref (MonoArray *arr, void* slot_ptr, MonoObject* value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_set_arrayref_internal (arr, slot_ptr, value));
}
/**
* mono_gc_wbarrier_arrayref_copy:
* \param dest_ptr destination slot address
* \param src_ptr source slot address
* \param count number of references to copy
* Copies \p count references from one array to another, executing a write
* barrier if needed.
*/
void
mono_gc_wbarrier_arrayref_copy (void* dest_ptr, /*const*/ void* src_ptr, int count)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_arrayref_copy_internal (dest_ptr, src_ptr, count));
}
/**
* mono_gc_wbarrier_generic_store:
* \param ptr address of field
* \param obj object to store
* Stores the \p value object inside the field represented by \p ptr,
* executing a write barrier if needed.
*/
void
mono_gc_wbarrier_generic_store (void* ptr, MonoObject* value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_generic_store_internal (ptr, value));
}
/**
* mono_gc_wbarrier_generic_store_atomic:
* Same as \c mono_gc_wbarrier_generic_store but performs the store
* as an atomic operation with release semantics.
*/
void
mono_gc_wbarrier_generic_store_atomic (void *ptr, MonoObject *value)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_generic_store_atomic_internal (ptr, value));
}
/**
* mono_gc_wbarrier_generic_nostore:
* Executes a write barrier for an address, informing the GC that
* the reference stored at that address has been changed.
*/
void
mono_gc_wbarrier_generic_nostore (void* ptr)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_generic_nostore_internal (ptr));
}
/**
* mono_gc_wbarrier_object_copy:
* \param dest destination address
* \param src source address
* \param count number of elements to copy
* \param klass type of elements to copy
* Copies \p count elements of type \p klass from \p src address to
* \dest address, executing any necessary write barriers.
*/
void
mono_gc_wbarrier_value_copy (void* dest, /*const*/ void* src, int count, MonoClass *klass)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_value_copy_internal (dest, src, count, klass));
}
/**
* mono_gc_wbarrier_object_copy:
* \param obj destination object
* \param src source object
* Copies contents of \p src to \p obj, executing any necessary write
* barriers.
*/
void
mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_gc_wbarrier_object_copy_internal (obj, src));
}
/**
* mono_class_init:
* \param klass the class to initialize
*
* Compute the \c instance_size, \c class_size and other infos that cannot be
* computed at \c mono_class_get time. Also compute vtable_size if possible.
* Initializes the following fields in \p klass:
* - all the fields initialized by \c mono_class_init_sizes
* - has_cctor
* - ghcimpl
* - inited
*
* LOCKING: Acquires the loader lock.
*
* \returns TRUE on success or FALSE if there was a problem in loading
* the type (incorrect assemblies, missing assemblies, methods, etc).
*/
mono_bool
mono_class_init (MonoClass *klass)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (gboolean, mono_class_init_internal (klass));
}
/**
* mono_g_hash_table_new_type:
*/
MonoGHashTable*
mono_g_hash_table_new_type (GHashFunc hash_func, GEqualFunc key_equal_func, MonoGHashGCType type, MonoGCRootSource source, void *key, const char *msg)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoGHashTable*, mono_g_hash_table_new_type_internal (hash_func, key_equal_func, type, source, key, msg));
}
/**
* mono_config_for_assembly:
*/
void
mono_config_for_assembly (MonoImage *assembly)
{
}
/**
* mono_class_get_property_from_name:
* \param klass a class
* \param name name of the property to lookup in the specified class
*
* Use this method to lookup a property in a class
* \returns the \c MonoProperty with the given name, or NULL if the property
* does not exist on the \p klass.
*/
MonoProperty*
mono_class_get_property_from_name (MonoClass *klass, const char *name)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (MonoProperty*, mono_class_get_property_from_name_internal (klass, name));
}
/**
* mono_class_is_subclass_of:
* \param klass class to probe if it is a subclass of another one
* \param klassc the class we suspect is the base class
* \param check_interfaces whether we should perform interface checks
*
* This method determines whether \p klass is a subclass of \p klassc.
*
* If the \p check_interfaces flag is set, then if \p klassc is an interface
* this method return TRUE if the \p klass implements the interface or
* if \p klass is an interface, if one of its base classes is \p klass.
*
* If \p check_interfaces is false, then if \p klass is not an interface,
* it returns TRUE if the \p klass is a subclass of \p klassc.
*
* if \p klass is an interface and \p klassc is \c System.Object, then this function
* returns TRUE.
*
*/
gboolean
mono_class_is_subclass_of (MonoClass *klass, MonoClass *klassc, gboolean check_interfaces)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (gboolean, mono_class_is_subclass_of_internal (klass, klassc, check_interfaces));
}
/**
* mono_domain_set_internal:
* \param domain the new domain
*
* Sets the current domain to \p domain.
*/
void
mono_domain_set_internal (MonoDomain *domain)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_domain_set_internal_with_options (domain, TRUE));
}
/**
* mono_domain_set:
* \param domain domain
* \param force force setting.
*
* Set the current appdomain to \p domain. If \p force is set, set it even
* if it is being unloaded.
*
* \returns TRUE on success; FALSE if the domain is unloaded
*/
gboolean
mono_domain_set (MonoDomain *domain, gboolean force)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_domain_set_internal_with_options (domain, TRUE));
return TRUE;
}
/**
* mono_assembly_name_free:
* \param aname assembly name to free
*
* Frees the provided assembly name object.
* (it does not frees the object itself, only the name members).
*/
void
mono_assembly_name_free (MonoAssemblyName *aname)
{
if (!aname)
return;
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_assembly_name_free_internal (aname));
}
/**
* mono_thread_manage:
*
*/
void
mono_thread_manage (void)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID (mono_thread_manage_internal ());
}
void
mono_register_config_for_assembly (const char* assembly_name, const char* config_xml)
{
}
/**
* mono_domain_free:
* \param domain the domain to release
* \param force if TRUE, it allows the root domain to be released (used at shutdown only).
*
* This releases the resources associated with the specific domain.
* This is a low-level function that is invoked by the AppDomain infrastructure
* when necessary.
*
* In theory, this is dead code on netcore and thus does not need to be ALC-aware.
*/
void
mono_domain_free (MonoDomain *domain, gboolean force)
{
g_assert_not_reached ();
}
/**
* mono_domain_get_id:
*
* A domain ID is guaranteed to be unique for as long as the domain
* using it is alive. It may be reused later once the domain has been
* unloaded.
*
* \returns The unique ID for \p domain.
*/
gint32
mono_domain_get_id (MonoDomain *domain)
{
return domain->domain_id;
}
/**
* mono_domain_get_friendly_name:
*
* The returned string's lifetime is the same as \p domain's. Consider
* copying it if you need to store it somewhere.
*
* \returns The friendly name of \p domain. Can be NULL if not yet set.
*/
const char *
mono_domain_get_friendly_name (MonoDomain *domain)
{
return domain->friendly_name;
}
/**
* mono_domain_is_unloading:
*/
gboolean
mono_domain_is_unloading (MonoDomain *domain)
{
return FALSE;
}
/**
* mono_domain_from_appdomain:
*/
MonoDomain *
mono_domain_from_appdomain (MonoAppDomain *appdomain_raw)
{
return mono_get_root_domain ();
}
/**
* mono_context_set:
*/
void
mono_context_set (MonoAppContext * new_context)
{
}
/**
* mono_context_get:
*
* Returns: the current Mono Application Context.
*/
MonoAppContext *
mono_context_get (void)
{
return NULL;
}
/**
* mono_context_get_id:
* \param context the context to operate on.
*
* Context IDs are guaranteed to be unique for the duration of a Mono
* process; they are never reused.
*
* \returns The unique ID for \p context.
*/
gint32
mono_context_get_id (MonoAppContext *context)
{
return context->context_id;
}
/**
* mono_context_get_domain_id:
* \param context the context to operate on.
* \returns The ID of the domain that \p context was created in.
*/
gint32
mono_context_get_domain_id (MonoAppContext *context)
{
return context->domain_id;
}
/**
* mono_string_equal:
* \param s1 First string to compare
* \param s2 Second string to compare
*
* Compares two \c MonoString* instances ordinally for equality.
*
* \returns FALSE if the strings differ.
*/
gboolean
mono_string_equal (MonoString *s1, MonoString *s2)
{
MONO_EXTERNAL_ONLY (gboolean, mono_string_equal_internal (s1, s2));
}
/**
* mono_string_hash:
* \param s the string to hash
*
* Compute the hash for a \c MonoString*
* \returns the hash for the string.
*/
guint
mono_string_hash (MonoString *s)
{
MONO_EXTERNAL_ONLY (guint, mono_string_hash_internal (s));
}
/**
* mono_domain_create:
*
* Creates a new application domain, the unmanaged representation
* of the actual domain.
*
* Application domains provide an isolation facilty for assemblies. You
* can load assemblies and execute code in them that will not be visible
* to other application domains. This is a runtime-based virtualization
* technology.
*
* It is possible to unload domains, which unloads the assemblies and
* data that was allocated in that domain.
*
* When a domain is created a mempool is allocated for domain-specific
* structures, along a dedicated code manager to hold code that is
* associated with the domain.
*
* \returns New initialized \c MonoDomain, with no configuration or assemblies
* loaded into it.
*/
MonoDomain *
mono_domain_create (void)
{
g_assert_not_reached ();
}
/**
* mono_domain_get_by_id:
* \param domainid the ID
* \returns the domain for a specific domain id.
*/
MonoDomain *
mono_domain_get_by_id (gint32 domainid)
{
MonoDomain * domain = mono_get_root_domain ();
if (domain->domain_id == domainid)
return domain;
else
return NULL;
}
/**
* mono_domain_assembly_open:
* \param domain the application domain
* \param name file name of the assembly
*/
MonoAssembly *
mono_domain_assembly_open (MonoDomain *domain, const char *name)
{
MonoAssembly *result;
MONO_ENTER_GC_UNSAFE;
result = mono_domain_assembly_open_internal (mono_alc_get_default (), name);
MONO_EXIT_GC_UNSAFE;
return result;
}
void
mono_domain_ensure_entry_assembly (MonoDomain *domain, MonoAssembly *assembly)
{
mono_runtime_ensure_entry_assembly (assembly);
}
/**
* mono_domain_foreach:
* \param func function to invoke with the domain data
* \param user_data user-defined pointer that is passed to the supplied \p func fo reach domain
*
* Use this method to safely iterate over all the loaded application
* domains in the current runtime. The provided \p func is invoked with a
* pointer to the \c MonoDomain and is given the value of the \p user_data
* parameter which can be used to pass state to your called routine.
*/
void
mono_domain_foreach (MonoDomainFunc func, gpointer user_data)
{
MONO_ENTER_GC_UNSAFE;
func (mono_get_root_domain (), user_data);
MONO_EXIT_GC_UNSAFE;
}
/**
* mono_context_init:
* \param domain The domain where the \c System.Runtime.Remoting.Context.Context is initialized
* Initializes the \p domain's default \c System.Runtime.Remoting 's Context.
*/
void
mono_context_init (MonoDomain *domain)
{
}
/**
* mono_domain_set_config:
* \param domain \c MonoDomain initialized with the appdomain we want to change
* \param base_dir new base directory for the appdomain
* \param config_file_name path to the new configuration for the app domain
*
* Used to set the system configuration for an appdomain
*
* Without using this, embedded builds will get 'System.Configuration.ConfigurationErrorsException:
* Error Initializing the configuration system. ---> System.ArgumentException:
* The 'ExeConfigFilename' argument cannot be null.' for some managed calls.
*/
void
mono_domain_set_config (MonoDomain *domain, const char *base_dir, const char *config_file_name)
{
g_assert_not_reached ();
}
/**
* mono_domain_try_type_resolve:
* \param domain application domain in which to resolve the type
* \param name the name of the type to resolve or NULL.
* \param typebuilder A \c System.Reflection.Emit.TypeBuilder, used if name is NULL.
*
* This routine invokes the internal \c System.AppDomain.DoTypeResolve and returns
* the assembly that matches name, or ((TypeBuilder)typebuilder).FullName.
*
* \returns A \c MonoReflectionAssembly or NULL if not found
*/
MonoReflectionAssembly *
mono_domain_try_type_resolve (MonoDomain *domain, char *name, MonoObject *typebuilder_raw)
{
HANDLE_FUNCTION_ENTER ();
g_assert (domain);
g_assert (name || typebuilder_raw);
ERROR_DECL (error);
MonoReflectionAssemblyHandle ret = NULL_HANDLE_INIT;
// This will not work correctly on netcore
if (name) {
MonoStringHandle name_handle = mono_string_new_handle (name, error);
goto_if_nok (error, exit);
ret = mono_domain_try_type_resolve_name (NULL, name_handle, error);
} else {
// TODO: make this work on netcore when working on SRE.TypeBuilder
g_assert_not_reached ();
}
exit:
mono_error_cleanup (error);
HANDLE_FUNCTION_RETURN_OBJ (ret);
}
/**
* mono_jit_info_table_find:
* \param domain Domain that you want to look up
* \param addr Points to an address with JITed code.
*
* Use this function to obtain a \c MonoJitInfo* object that can be used to get
* some statistics. You should provide both the \p domain on which you will be
* performing the probe, and an address. Since application domains can share code
* the same address can be in use by multiple domains at once.
*
* This does not return any results for trampolines.
*
* \returns NULL if the address does not belong to JITed code (it might be native
* code or a trampoline) or a valid pointer to a \c MonoJitInfo* .
*/
MonoJitInfo*
mono_jit_info_table_find (MonoDomain *domain, gpointer addr)
{
return mono_jit_info_table_find_internal (addr, TRUE, FALSE);
}
/**
* mono_domain_owns_vtable_slot:
* \returns Whether \p vtable_slot is inside a vtable which belongs to \p domain.
*/
gboolean
mono_domain_owns_vtable_slot (MonoDomain *domain, gpointer vtable_slot)
{
return mono_mem_manager_mp_contains_addr (mono_mem_manager_get_ambient (), vtable_slot);
}
/**
* mono_method_get_unmanaged_callers_only_ftnptr:
* \param method method to generate a thunk for.
* \param error set on error
*
* Returns a function pointer for calling the given UnmanagedCallersOnly method from native code.
* The function pointer will use the calling convention specified on the UnmanagedCallersOnly
* attribute (or the default platform calling convention if omitted).
*
* Unlike \c mono_method_get_unmanaged_thunk, minimal marshaling is done to the method parameters in
* the wrapper. See
* https://docs.microsoft.com/en-us/dotnet/api/system.runtime.interopservices.unmanagedcallersonlyattribute?view=net-6.0
* The method must be static and only use blittable argument types. There is no exception out-argument.
*
*
*/
void*
mono_method_get_unmanaged_callers_only_ftnptr (MonoMethod *method, MonoError *error)
{
MONO_EXTERNAL_ONLY_GC_UNSAFE (gpointer, mono_method_get_unmanaged_wrapper_ftnptr_internal (method, TRUE, error));
}
| 1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/metadata/icall.c | /**
* \file
*
* Authors:
* Dietmar Maurer ([email protected])
* Paolo Molaro ([email protected])
* Patrik Torstensson ([email protected])
* Marek Safar ([email protected])
* Aleksey Kliger ([email protected])
*
* Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
* Copyright 2011-2015 Xamarin Inc (http://www.xamarin.com).
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#if defined(TARGET_WIN32) || defined(HOST_WIN32)
#include <stdio.h>
#endif
#include <glib.h>
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#if defined (HAVE_WCHAR_H)
#include <wchar.h>
#endif
#include "mono/metadata/icall-internals.h"
#include "mono/utils/mono-membar.h"
#include <mono/metadata/object.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/monitor.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/image-internals.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/assembly-internals.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/exception-internals.h>
#include <mono/metadata/w32file.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/metadata-internals.h>
#include <mono/metadata/metadata-update.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/class-init.h>
#include <mono/metadata/reflection-internals.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/mono-gc.h>
#include <mono/metadata/appdomain-icalls.h>
#include <mono/metadata/string-icalls.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-config.h>
#include <mono/metadata/cil-coff.h>
#include <mono/metadata/mono-perfcounters.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/mono-ptr-array.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/runtime.h>
#include <mono/metadata/seq-points-data.h>
#include <mono/metadata/icall-table.h>
#include <mono/metadata/handle.h>
#include <mono/metadata/w32event.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/loader-internals.h>
#include <mono/utils/monobitset.h>
#include <mono/utils/mono-time.h>
#include <mono/utils/mono-proclib.h>
#include <mono/utils/mono-string.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-digest.h>
#include <mono/utils/bsearch.h>
#include <mono/utils/mono-os-mutex.h>
#include <mono/utils/mono-threads.h>
#include <mono/metadata/w32error.h>
#include <mono/utils/w32api.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/utils/mono-math.h>
#if !defined(HOST_WIN32) && defined(HAVE_SYS_UTSNAME_H)
#include <sys/utsname.h>
#endif
#if defined(HOST_WIN32)
#include <windows.h>
#endif
#include "icall-decl.h"
#include "mono/utils/mono-threads-coop.h"
#include "mono/metadata/icall-signatures.h"
#include "mono/utils/mono-signal-handler.h"
#if _MSC_VER
#pragma warning(disable:4047) // FIXME differs in levels of indirection
#endif
//#define MONO_DEBUG_ICALLARRAY
// Inline with CoreCLR heuristics, https://github.com/dotnet/runtime/blob/69e114c1abf91241a0eeecf1ecceab4711b8aa62/src/coreclr/vm/threads.cpp#L6408.
// Minimum stack size should be sufficient to allow a typical non-recursive call chain to execute,
// including potential exception handling and garbage collection. Used for probing for available
// stack space through RuntimeHelpers.EnsureSufficientExecutionStack.
#if TARGET_SIZEOF_VOID_P == 8
#define MONO_MIN_EXECUTION_STACK_SIZE (128 * 1024)
#else
#define MONO_MIN_EXECUTION_STACK_SIZE (64 * 1024)
#endif
#ifdef MONO_DEBUG_ICALLARRAY
static char debug_icallarray; // 0:uninitialized 1:true 2:false
static gboolean
icallarray_print_enabled (void)
{
if (!debug_icallarray)
debug_icallarray = MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_ICALLARRAY) ? 1 : 2;
return debug_icallarray == 1;
}
static void
icallarray_print (const char *format, ...)
{
if (!icallarray_print_enabled ())
return;
va_list args;
va_start (args, format);
g_printv (format, args);
va_end (args);
}
#else
#define icallarray_print_enabled() (FALSE)
#define icallarray_print(...) /* nothing */
#endif
/* Lazy class loading functions */
static GENERATE_GET_CLASS_WITH_CACHE (module, "System.Reflection", "Module")
static void
array_set_value_impl (MonoArrayHandle arr, MonoObjectHandle value, guint32 pos, gboolean strict_enums, gboolean strict_signs, MonoError *error);
static MonoArrayHandle
type_array_from_modifiers (MonoType *type, int optional, MonoError *error);
static inline MonoBoolean
is_generic_parameter (MonoType *type)
{
return !m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR);
}
#ifdef HOST_WIN32
static void
mono_icall_make_platform_path (gchar *path)
{
for (size_t i = strlen (path); i > 0; i--)
if (path [i-1] == '\\')
path [i-1] = '/';
}
static const gchar *
mono_icall_get_file_path_prefix (const gchar *path)
{
if (*path == '/' && *(path + 1) == '/') {
return "file:";
} else {
return "file:///";
}
}
#else
static inline void
mono_icall_make_platform_path (gchar *path)
{
return;
}
static inline const gchar *
mono_icall_get_file_path_prefix (const gchar *path)
{
return "file://";
}
#endif /* HOST_WIN32 */
MonoJitICallInfos mono_jit_icall_info;
MonoObjectHandle
ves_icall_System_Array_GetValueImpl (MonoArrayHandle array, guint32 pos, MonoError *error)
{
MonoClass * const array_class = mono_handle_class (array);
MonoClass * const element_class = m_class_get_element_class (array_class);
if (m_class_is_native_pointer (element_class)) {
mono_error_set_not_supported (error, NULL);
return NULL_HANDLE;
}
if (m_class_is_valuetype (element_class)) {
gsize element_size = mono_array_element_size (array_class);
gpointer element_address = mono_array_addr_with_size_fast (MONO_HANDLE_RAW (array), element_size, (gsize)pos);
return mono_value_box_handle (element_class, element_address, error);
}
MonoObjectHandle result = mono_new_null ();
mono_handle_array_getref (result, array, pos);
return result;
}
void
ves_icall_System_Array_SetValueImpl (MonoArrayHandle arr, MonoObjectHandle value, guint32 pos, MonoError *error)
{
array_set_value_impl (arr, value, pos, TRUE, TRUE, error);
}
static inline void
set_invalid_cast (MonoError *error, MonoClass *src_class, MonoClass *dst_class)
{
mono_get_runtime_callbacks ()->set_cast_details (src_class, dst_class);
mono_error_set_invalid_cast (error);
}
void
ves_icall_System_Array_SetValueRelaxedImpl (MonoArrayHandle arr, MonoObjectHandle value, guint32 pos, MonoError *error)
{
array_set_value_impl (arr, value, pos, FALSE, FALSE, error);
}
// Copied from CoreCLR: https://github.com/dotnet/coreclr/blob/d3e39bc2f81e3dbf9e4b96347f62b49d8700336c/src/vm/invokeutil.cpp#L33
#define PT_Primitive 0x01000000
static const guint32 primitive_conversions [] = {
0x00, // MONO_TYPE_END
0x00, // MONO_TYPE_VOID
PT_Primitive | 0x0004, // MONO_TYPE_BOOLEAN
PT_Primitive | 0x3F88, // MONO_TYPE_CHAR (W = U2, CHAR, I4, U4, I8, U8, R4, R8)
PT_Primitive | 0x3550, // MONO_TYPE_I1 (W = I1, I2, I4, I8, R4, R8)
PT_Primitive | 0x3FE8, // MONO_TYPE_U1 (W = CHAR, U1, I2, U2, I4, U4, I8, U8, R4, R8)
PT_Primitive | 0x3540, // MONO_TYPE_I2 (W = I2, I4, I8, R4, R8)
PT_Primitive | 0x3F88, // MONO_TYPE_U2 (W = U2, CHAR, I4, U4, I8, U8, R4, R8)
PT_Primitive | 0x3500, // MONO_TYPE_I4 (W = I4, I8, R4, R8)
PT_Primitive | 0x3E00, // MONO_TYPE_U4 (W = U4, I8, R4, R8)
PT_Primitive | 0x3400, // MONO_TYPE_I8 (W = I8, R4, R8)
PT_Primitive | 0x3800, // MONO_TYPE_U8 (W = U8, R4, R8)
PT_Primitive | 0x3000, // MONO_TYPE_R4 (W = R4, R8)
PT_Primitive | 0x2000, // MONO_TYPE_R8 (W = R8)
};
// Copied from CoreCLR: https://github.com/dotnet/coreclr/blob/030a3ea9b8dbeae89c90d34441d4d9a1cf4a7de6/src/vm/invokeutil.h#L176
static
gboolean can_primitive_widen (MonoTypeEnum src_type, MonoTypeEnum dest_type)
{
if (dest_type > MONO_TYPE_R8 || src_type > MONO_TYPE_R8) {
return (MONO_TYPE_I == dest_type && MONO_TYPE_I == src_type) || (MONO_TYPE_U == dest_type && MONO_TYPE_U == src_type);
}
return ((1 << dest_type) & primitive_conversions [src_type]) != 0;
}
// Copied from CoreCLR: https://github.com/dotnet/coreclr/blob/eafa8648ebee92de1380278b15cd5c2b6ef11218/src/vm/array.cpp#L1406
static MonoTypeEnum
get_normalized_integral_array_element_type (MonoTypeEnum elementType)
{
// Array Primitive types such as E_T_I4 and E_T_U4 are interchangeable
// Enums with interchangeable underlying types are interchangable
// BOOL is NOT interchangeable with I1/U1, neither CHAR -- with I2/U2
switch (elementType) {
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_U:
return (MonoTypeEnum) (elementType - 1); // normalize to signed type
}
return elementType;
}
MonoBoolean
ves_icall_System_Array_CanChangePrimitive (MonoReflectionType *volatile* ref_src_type_handle, MonoReflectionType *volatile* ref_dst_type_handle, MonoBoolean reliable)
{
MonoReflectionType* const ref_src_type = *ref_src_type_handle;
MonoReflectionType* const ref_dst_type = *ref_dst_type_handle;
MonoType *src_type = ref_src_type->type;
MonoType *dst_type = ref_dst_type->type;
g_assert (mono_type_is_primitive (src_type));
g_assert (mono_type_is_primitive (dst_type));
MonoTypeEnum normalized_src_type = get_normalized_integral_array_element_type (src_type->type);
MonoTypeEnum normalized_dst_type = get_normalized_integral_array_element_type (dst_type->type);
// Allow conversions like int <-> uint
if (normalized_src_type == normalized_dst_type) {
return TRUE;
}
// Widening is not allowed if reliable is true.
if (reliable) {
return FALSE;
}
// NOTE we don't use normalized types here so int -> ulong will be false
// see https://github.com/dotnet/coreclr/pull/25209#issuecomment-505952295
return can_primitive_widen (src_type->type, dst_type->type);
}
static void
array_set_value_impl (MonoArrayHandle arr_handle, MonoObjectHandle value_handle, guint32 pos, gboolean strict_enums, gboolean strict_signs, MonoError *error)
{
MonoClass *ac, *vc, *ec;
gint32 esize, vsize;
gpointer *ea = NULL, *va = NULL;
guint64 u64 = 0;
gint64 i64 = 0;
gdouble r64 = 0;
gboolean castOk = FALSE;
gboolean et_isenum = FALSE;
gboolean vt_isenum = FALSE;
if (!MONO_HANDLE_IS_NULL (value_handle))
vc = mono_handle_class (value_handle);
else
vc = NULL;
ac = mono_handle_class (arr_handle);
ec = m_class_get_element_class (ac);
esize = mono_array_element_size (ac);
if (mono_class_is_nullable (ec)) {
if (vc && m_class_is_primitive (vc) && vc != m_class_get_nullable_elem_class (ec)) {
// T -> Nullable<T> T must be exact
set_invalid_cast (error, vc, ec);
goto leave;
}
MONO_ENTER_NO_SAFEPOINTS;
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
if (!MONO_HANDLE_IS_NULL (value_handle))
va = (gpointer*) mono_object_unbox_internal (MONO_HANDLE_RAW (value_handle));
mono_nullable_init_unboxed ((guint8*)ea, va, ec);
MONO_EXIT_NO_SAFEPOINTS;
goto leave;
}
if (MONO_HANDLE_IS_NULL (value_handle)) {
MONO_ENTER_NO_SAFEPOINTS;
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
mono_gc_bzero_atomic (ea, esize);
MONO_EXIT_NO_SAFEPOINTS;
goto leave;
}
#define WIDENING_MSG NULL
#define WIDENING_ARG NULL
#define NO_WIDENING_CONVERSION G_STMT_START{ \
mono_error_set_argument (error, WIDENING_ARG, WIDENING_MSG); \
break; \
}G_STMT_END
#define CHECK_WIDENING_CONVERSION(extra) G_STMT_START{ \
if (esize < vsize + (extra)) { \
mono_error_set_argument (error, WIDENING_ARG, WIDENING_MSG); \
break; \
} \
}G_STMT_END
#define INVALID_CAST G_STMT_START{ \
mono_get_runtime_callbacks ()->set_cast_details (vc, ec); \
mono_error_set_invalid_cast (error); \
break; \
}G_STMT_END
MonoTypeEnum et;
et = m_class_get_byval_arg (ec)->type;
MonoTypeEnum vt;
vt = m_class_get_byval_arg (vc)->type;
/* Check element (destination) type. */
switch (et) {
case MONO_TYPE_STRING:
switch (vt) {
case MONO_TYPE_STRING:
break;
default:
INVALID_CAST;
}
break;
case MONO_TYPE_BOOLEAN:
switch (vt) {
case MONO_TYPE_BOOLEAN:
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_I1:
case MONO_TYPE_I2:
case MONO_TYPE_I4:
case MONO_TYPE_I8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
NO_WIDENING_CONVERSION;
break;
default:
INVALID_CAST;
}
break;
default:
break;
}
if (!is_ok (error))
goto leave;
castOk = mono_object_handle_isinst_mbyref_raw (value_handle, ec, error);
if (!is_ok (error))
goto leave;
if (!m_class_is_valuetype (ec)) {
if (!castOk)
INVALID_CAST;
if (is_ok (error))
MONO_HANDLE_ARRAY_SETREF (arr_handle, pos, value_handle);
goto leave;
}
if (castOk) {
MONO_ENTER_NO_SAFEPOINTS;
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
va = (gpointer*) mono_object_unbox_internal (MONO_HANDLE_RAW (value_handle));
if (m_class_has_references (ec))
mono_value_copy_internal (ea, va, ec);
else
mono_gc_memmove_atomic (ea, va, esize);
MONO_EXIT_NO_SAFEPOINTS;
goto leave;
}
if (!m_class_is_valuetype (vc))
INVALID_CAST;
if (!is_ok (error))
goto leave;
vsize = mono_class_value_size (vc, NULL);
et_isenum = et == MONO_TYPE_VALUETYPE && m_class_is_enumtype (m_class_get_byval_arg (ec)->data.klass);
vt_isenum = vt == MONO_TYPE_VALUETYPE && m_class_is_enumtype (m_class_get_byval_arg (vc)->data.klass);
if (strict_enums && et_isenum && !vt_isenum) {
INVALID_CAST;
goto leave;
}
if (et_isenum)
et = mono_class_enum_basetype_internal (m_class_get_byval_arg (ec)->data.klass)->type;
if (vt_isenum)
vt = mono_class_enum_basetype_internal (m_class_get_byval_arg (vc)->data.klass)->type;
// Treat MONO_TYPE_U/I as MONO_TYPE_U8/I8/U4/I4
#if SIZEOF_VOID_P == 8
vt = vt == MONO_TYPE_U ? MONO_TYPE_U8 : (vt == MONO_TYPE_I ? MONO_TYPE_I8 : vt);
et = et == MONO_TYPE_U ? MONO_TYPE_U8 : (et == MONO_TYPE_I ? MONO_TYPE_I8 : et);
#else
vt = vt == MONO_TYPE_U ? MONO_TYPE_U4 : (vt == MONO_TYPE_I ? MONO_TYPE_I4 : vt);
et = et == MONO_TYPE_U ? MONO_TYPE_U4 : (et == MONO_TYPE_I ? MONO_TYPE_I4 : et);
#endif
#define ASSIGN_UNSIGNED(etype) G_STMT_START{\
switch (vt) { \
case MONO_TYPE_U1: \
case MONO_TYPE_U2: \
case MONO_TYPE_U4: \
case MONO_TYPE_U8: \
case MONO_TYPE_CHAR: \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) u64; \
break; \
/* You can't assign a signed value to an unsigned array. */ \
case MONO_TYPE_I1: \
case MONO_TYPE_I2: \
case MONO_TYPE_I4: \
case MONO_TYPE_I8: \
if (!strict_signs) { \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) i64; \
break; \
} \
/* You can't assign a floating point number to an integer array. */ \
case MONO_TYPE_R4: \
case MONO_TYPE_R8: \
NO_WIDENING_CONVERSION; \
break; \
default: \
INVALID_CAST; \
break; \
} \
}G_STMT_END
#define ASSIGN_SIGNED(etype) G_STMT_START{\
switch (vt) { \
case MONO_TYPE_I1: \
case MONO_TYPE_I2: \
case MONO_TYPE_I4: \
case MONO_TYPE_I8: \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) i64; \
break; \
/* You can assign an unsigned value to a signed array if the array's */ \
/* element size is larger than the value size. */ \
case MONO_TYPE_U1: \
case MONO_TYPE_U2: \
case MONO_TYPE_U4: \
case MONO_TYPE_U8: \
case MONO_TYPE_CHAR: \
CHECK_WIDENING_CONVERSION(strict_signs ? 1 : 0); \
*(etype *) ea = (etype) u64; \
break; \
/* You can't assign a floating point number to an integer array. */ \
case MONO_TYPE_R4: \
case MONO_TYPE_R8: \
NO_WIDENING_CONVERSION; \
break; \
default: \
INVALID_CAST; \
break; \
} \
}G_STMT_END
#define ASSIGN_REAL(etype) G_STMT_START{\
switch (vt) { \
case MONO_TYPE_R4: \
case MONO_TYPE_R8: \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) r64; \
break; \
/* All integer values fit into a floating point array, so we don't */ \
/* need to CHECK_WIDENING_CONVERSION here. */ \
case MONO_TYPE_I1: \
case MONO_TYPE_I2: \
case MONO_TYPE_I4: \
case MONO_TYPE_I8: \
*(etype *) ea = (etype) i64; \
break; \
case MONO_TYPE_U1: \
case MONO_TYPE_U2: \
case MONO_TYPE_U4: \
case MONO_TYPE_U8: \
case MONO_TYPE_CHAR: \
*(etype *) ea = (etype) u64; \
break; \
default: \
INVALID_CAST; \
break; \
} \
}G_STMT_END
MONO_ENTER_NO_SAFEPOINTS;
g_assert (!MONO_HANDLE_IS_NULL (value_handle));
g_assert (m_class_is_valuetype (vc));
va = (gpointer*) mono_object_unbox_internal (MONO_HANDLE_RAW (value_handle));
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
switch (vt) {
case MONO_TYPE_U1:
u64 = *(guint8 *) va;
break;
case MONO_TYPE_U2:
u64 = *(guint16 *) va;
break;
case MONO_TYPE_U4:
u64 = *(guint32 *) va;
break;
case MONO_TYPE_U8:
u64 = *(guint64 *) va;
break;
case MONO_TYPE_I1:
i64 = *(gint8 *) va;
break;
case MONO_TYPE_I2:
i64 = *(gint16 *) va;
break;
case MONO_TYPE_I4:
i64 = *(gint32 *) va;
break;
case MONO_TYPE_I8:
i64 = *(gint64 *) va;
break;
case MONO_TYPE_R4:
r64 = *(gfloat *) va;
break;
case MONO_TYPE_R8:
r64 = *(gdouble *) va;
break;
case MONO_TYPE_CHAR:
u64 = *(guint16 *) va;
break;
case MONO_TYPE_BOOLEAN:
/* Boolean is only compatible with itself. */
switch (et) {
case MONO_TYPE_CHAR:
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_I1:
case MONO_TYPE_I2:
case MONO_TYPE_I4:
case MONO_TYPE_I8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
NO_WIDENING_CONVERSION;
break;
default:
INVALID_CAST;
}
break;
default:
break;
}
/* If we can't do a direct copy, let's try a widening conversion. */
if (is_ok (error)) {
switch (et) {
case MONO_TYPE_CHAR:
ASSIGN_UNSIGNED (guint16);
break;
case MONO_TYPE_U1:
ASSIGN_UNSIGNED (guint8);
break;
case MONO_TYPE_U2:
ASSIGN_UNSIGNED (guint16);
break;
case MONO_TYPE_U4:
ASSIGN_UNSIGNED (guint32);
break;
case MONO_TYPE_U8:
ASSIGN_UNSIGNED (guint64);
break;
case MONO_TYPE_I1:
ASSIGN_SIGNED (gint8);
break;
case MONO_TYPE_I2:
ASSIGN_SIGNED (gint16);
break;
case MONO_TYPE_I4:
ASSIGN_SIGNED (gint32);
break;
case MONO_TYPE_I8:
ASSIGN_SIGNED (gint64);
break;
case MONO_TYPE_R4:
ASSIGN_REAL (gfloat);
break;
case MONO_TYPE_R8:
ASSIGN_REAL (gdouble);
break;
default:
INVALID_CAST;
}
}
MONO_EXIT_NO_SAFEPOINTS;
#undef INVALID_CAST
#undef NO_WIDENING_CONVERSION
#undef CHECK_WIDENING_CONVERSION
#undef ASSIGN_UNSIGNED
#undef ASSIGN_SIGNED
#undef ASSIGN_REAL
leave:
return;
}
void
ves_icall_System_Array_InternalCreate (MonoArray *volatile* result, MonoType* type, gint32 rank, gint32* pLengths, gint32* pLowerBounds)
{
ERROR_DECL (error);
MonoClass* klass = mono_class_from_mono_type_internal (type);
if (!mono_class_init_checked (klass, error))
goto exit;
if (m_class_get_byval_arg (m_class_get_element_class (klass))->type == MONO_TYPE_VOID) {
mono_error_set_not_supported (error, "Arrays of System.Void are not supported.");
goto exit;
}
if (m_type_is_byref (type) || m_class_is_byreflike (klass)) {
mono_error_set_not_supported (error, NULL);
goto exit;
}
MonoGenericClass *gklass;
gklass = mono_class_try_get_generic_class (klass);
if (is_generic_parameter (type) || mono_class_is_gtd (klass) || (gklass && gklass->context.class_inst->is_open)) {
mono_error_set_not_supported (error, NULL);
goto exit;
}
/* vectors are not the same as one dimensional arrays with non-zero bounds */
gboolean bounded;
bounded = pLowerBounds != NULL && rank == 1 && pLowerBounds [0] != 0;
MonoClass* aklass;
aklass = mono_class_create_bounded_array (klass, rank, bounded);
uintptr_t aklass_rank;
aklass_rank = m_class_get_rank (aklass);
uintptr_t* sizes;
sizes = g_newa (uintptr_t, aklass_rank * 2);
intptr_t* lower_bounds;
lower_bounds = (intptr_t*)(sizes + aklass_rank);
// Copy lengths and lower_bounds from gint32 to [u]intptr_t.
for (uintptr_t i = 0; i < aklass_rank; ++i) {
if (pLowerBounds != NULL) {
lower_bounds [i] = pLowerBounds [i];
if ((gint64) pLowerBounds [i] + (gint64) pLengths [i] > G_MAXINT32) {
mono_error_set_argument_out_of_range (error, NULL, "Length + bound must not exceed Int32.MaxValue.");
goto exit;
}
} else {
lower_bounds [i] = 0;
}
sizes [i] = pLengths [i];
}
*result = mono_array_new_full_checked (aklass, sizes, lower_bounds, error);
exit:
mono_error_set_pending_exception (error);
}
gint32
ves_icall_System_Array_GetCorElementTypeOfElementType (MonoArrayHandle arr, MonoError *error)
{
MonoType *type = mono_type_get_underlying_type (m_class_get_byval_arg (m_class_get_element_class (mono_handle_class (arr))));
return type->type;
}
gint32
ves_icall_System_Array_IsValueOfElementType (MonoArrayHandle arr, MonoObjectHandle obj, MonoError *error)
{
return m_class_get_element_class (mono_handle_class (arr)) == mono_handle_class (obj);
}
static mono_array_size_t
mono_array_get_length (MonoArrayHandle arr, gint32 dimension, MonoError *error)
{
if (dimension < 0 || dimension >= m_class_get_rank (mono_handle_class (arr))) {
mono_error_set_index_out_of_range (error);
return 0;
}
return MONO_HANDLE_GETVAL (arr, bounds) ? MONO_HANDLE_GETVAL (arr, bounds [dimension].length)
: MONO_HANDLE_GETVAL (arr, max_length);
}
gint32
ves_icall_System_Array_GetLength (MonoArrayHandle arr, gint32 dimension, MonoError *error)
{
icallarray_print ("%s arr:%p dimension:%d\n", __func__, MONO_HANDLE_RAW (arr), (int)dimension);
mono_array_size_t const length = mono_array_get_length (arr, dimension, error);
if (length > G_MAXINT32) {
mono_error_set_overflow (error);
return 0;
}
return (gint32)length;
}
gint32
ves_icall_System_Array_GetLowerBound (MonoArrayHandle arr, gint32 dimension, MonoError *error)
{
icallarray_print ("%s arr:%p dimension:%d\n", __func__, MONO_HANDLE_RAW (arr), (int)dimension);
if (dimension < 0 || dimension >= m_class_get_rank (mono_handle_class (arr))) {
mono_error_set_index_out_of_range (error);
return 0;
}
return MONO_HANDLE_GETVAL (arr, bounds) ? MONO_HANDLE_GETVAL (arr, bounds [dimension].lower_bound)
: 0;
}
MonoBoolean
ves_icall_System_Array_FastCopy (MonoArrayHandle source, int source_idx, MonoArrayHandle dest, int dest_idx, int length, MonoError *error)
{
MonoVTable * const src_vtable = MONO_HANDLE_GETVAL (source, obj.vtable);
MonoVTable * const dest_vtable = MONO_HANDLE_GETVAL (dest, obj.vtable);
if (src_vtable->rank != dest_vtable->rank)
return FALSE;
MonoArrayBounds *source_bounds = MONO_HANDLE_GETVAL (source, bounds);
MonoArrayBounds *dest_bounds = MONO_HANDLE_GETVAL (dest, bounds);
for (int i = 0; i < src_vtable->rank; i++) {
if ((source_bounds && source_bounds [i].lower_bound > 0) ||
(dest_bounds && dest_bounds [i].lower_bound > 0))
return FALSE;
}
/* there's no integer overflow since mono_array_length_internal returns an unsigned integer */
if ((dest_idx + length > mono_array_handle_length (dest)) ||
(source_idx + length > mono_array_handle_length (source)))
return FALSE;
MonoClass * const src_class = m_class_get_element_class (src_vtable->klass);
MonoClass * const dest_class = m_class_get_element_class (dest_vtable->klass);
/*
* Handle common cases.
*/
/* Case1: object[] -> valuetype[] (ArrayList::ToArray)
We fallback to managed here since we need to typecheck each boxed valuetype before storing them in the dest array.
*/
if (src_class == mono_defaults.object_class && m_class_is_valuetype (dest_class))
return FALSE;
/* Check if we're copying a char[] <==> (u)short[] */
if (src_class != dest_class) {
if (m_class_is_valuetype (dest_class) || m_class_is_enumtype (dest_class) ||
m_class_is_valuetype (src_class) || m_class_is_valuetype (src_class))
return FALSE;
/* It's only safe to copy between arrays if we can ensure the source will always have a subtype of the destination. We bail otherwise. */
if (!mono_class_is_subclass_of_internal (src_class, dest_class, FALSE))
return FALSE;
if (m_class_is_native_pointer (src_class) || m_class_is_native_pointer (dest_class))
return FALSE;
}
if (m_class_is_valuetype (dest_class)) {
gsize const element_size = mono_array_element_size (MONO_HANDLE_GETVAL (source, obj.vtable->klass));
MONO_ENTER_NO_SAFEPOINTS; // gchandle would also work here, is slow, breaks profiler tests.
gconstpointer const source_addr =
mono_array_addr_with_size_fast (MONO_HANDLE_RAW (source), element_size, source_idx);
if (m_class_has_references (dest_class)) {
mono_value_copy_array_handle (dest, dest_idx, source_addr, length);
} else {
gpointer const dest_addr =
mono_array_addr_with_size_fast (MONO_HANDLE_RAW (dest), element_size, dest_idx);
mono_gc_memmove_atomic (dest_addr, source_addr, element_size * length);
}
MONO_EXIT_NO_SAFEPOINTS;
} else {
mono_array_handle_memcpy_refs (dest, dest_idx, source, source_idx, length);
}
return TRUE;
}
void
ves_icall_System_Array_GetGenericValue_icall (MonoArray **arr, guint32 pos, gpointer value)
{
icallarray_print ("%s arr:%p pos:%u value:%p\n", __func__, *arr, pos, value);
MONO_REQ_GC_UNSAFE_MODE; // because of gpointer value
MonoClass * const ac = mono_object_class (*arr);
gsize const esize = mono_array_element_size (ac);
gconstpointer * const ea = (gconstpointer*)((char*)(*arr)->vector + (pos * esize));
mono_gc_memmove_atomic (value, ea, esize);
}
void
ves_icall_System_Array_SetGenericValue_icall (MonoArray **arr, guint32 pos, gpointer value)
{
icallarray_print ("%s arr:%p pos:%u value:%p\n", __func__, *arr, pos, value);
MONO_REQ_GC_UNSAFE_MODE; // because of gpointer value
MonoClass * const ac = mono_object_class (*arr);
MonoClass * const ec = m_class_get_element_class (ac);
gsize const esize = mono_array_element_size (ac);
gpointer * const ea = (gpointer*)((char*)(*arr)->vector + (pos * esize));
if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (ec))) {
g_assert (esize == sizeof (gpointer));
mono_gc_wbarrier_generic_store_internal (ea, *(MonoObject **)value);
} else {
g_assert (m_class_is_inited (ec));
g_assert (esize == mono_class_value_size (ec, NULL));
if (m_class_has_references (ec))
mono_gc_wbarrier_value_copy_internal (ea, value, 1, ec);
else
mono_gc_memmove_atomic (ea, value, esize);
}
}
void
ves_icall_System_Runtime_RuntimeImports_Memmove (guint8 *destination, guint8 *source, size_t byte_count)
{
mono_gc_memmove_atomic (destination, source, byte_count);
}
void
ves_icall_System_Buffer_BulkMoveWithWriteBarrier (guint8 *destination, guint8 *source, size_t len, MonoType *type)
{
if (MONO_TYPE_IS_REFERENCE (type))
mono_gc_wbarrier_arrayref_copy_internal (destination, source, (guint)len);
else
mono_gc_wbarrier_value_copy_internal (destination, source, (guint)len, mono_class_from_mono_type_internal (type));
}
void
ves_icall_System_Runtime_RuntimeImports_ZeroMemory (guint8 *p, size_t byte_length)
{
memset (p, 0, byte_length);
}
gpointer
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetSpanDataFrom (MonoClassField *field_handle, MonoType_ptr targetTypeHandle, gpointer countPtr, MonoError *error)
{
gint32* count = (gint32*)countPtr;
MonoType *field_type = mono_field_get_type_checked (field_handle, error);
if (!field_type) {
mono_error_set_argument (error, "fldHandle", "fldHandle invalid");
return NULL;
}
if (!(field_type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA)) {
mono_error_set_argument_format (error, "field_handle", "Field '%s' doesn't have an RVA", mono_field_get_name (field_handle));
return NULL;
}
MonoType *type = targetTypeHandle;
if (MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_VALUETYPE) {
mono_error_set_argument (error, "array", "Cannot initialize array of non-primitive type");
return NULL;
}
int swizzle = 1;
int align;
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
swizzle = mono_type_size (type, &align);
#endif
int dummy;
*count = mono_type_size (field_type, &dummy)/mono_type_size (type, &align);
return (gpointer)mono_field_get_rva (field_handle, swizzle);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray (MonoArrayHandle array, MonoClassField *field_handle, MonoError *error)
{
MonoClass *klass = mono_handle_class (array);
guint32 size = mono_array_element_size (klass);
MonoType *type = mono_type_get_underlying_type (m_class_get_byval_arg (m_class_get_element_class (klass)));
int align;
const char *field_data;
if (MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_VALUETYPE) {
mono_error_set_argument (error, "array", "Cannot initialize array of non-primitive type");
return;
}
MonoType *field_type = mono_field_get_type_checked (field_handle, error);
if (!field_type)
return;
if (!(field_type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA)) {
mono_error_set_argument_format (error, "field_handle", "Field '%s' doesn't have an RVA", mono_field_get_name (field_handle));
return;
}
size *= MONO_HANDLE_GETVAL(array, max_length);
field_data = mono_field_get_data (field_handle);
if (size > mono_type_size (field_handle->type, &align)) {
mono_error_set_argument (error, "field_handle", "Field not large enough to fill array");
return;
}
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
#define SWAP(n) { \
guint ## n *data = (guint ## n *) mono_array_addr_internal (MONO_HANDLE_RAW(array), char, 0); \
guint ## n *src = (guint ## n *) field_data; \
int i, \
nEnt = (size / sizeof(guint ## n)); \
\
for (i = 0; i < nEnt; i++) { \
data[i] = read ## n (&src[i]); \
} \
}
/* printf ("Initialize array with elements of %s type\n", klass->element_class->name); */
switch (type->type) {
case MONO_TYPE_CHAR:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
SWAP (16);
break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_R4:
SWAP (32);
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R8:
SWAP (64);
break;
default:
memcpy (mono_array_addr_internal (MONO_HANDLE_RAW(array), char, 0), field_data, size);
break;
}
#else
memcpy (mono_array_addr_internal (MONO_HANDLE_RAW(array), char, 0), field_data, size);
#endif
}
MonoObjectHandle
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetObjectValue (MonoObjectHandle obj, MonoError *error)
{
if (MONO_HANDLE_IS_NULL (obj) || !m_class_is_valuetype (mono_handle_class (obj)))
return obj;
return mono_object_clone_handle (obj, error);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunClassConstructor (MonoType *handle, MonoError *error)
{
MonoClass *klass;
MonoVTable *vtable;
MONO_CHECK_ARG_NULL (handle,);
klass = mono_class_from_mono_type_internal (handle);
MONO_CHECK_ARG (handle, klass,);
if (mono_class_is_gtd (klass))
return;
vtable = mono_class_vtable_checked (klass, error);
return_if_nok (error);
/* This will call the type constructor */
mono_runtime_class_init_full (vtable, error);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunModuleConstructor (MonoImage *image, MonoError *error)
{
mono_image_check_for_module_cctor (image);
if (!image->has_module_cctor)
return;
MonoClass *module_klass = mono_class_get_checked (image, MONO_TOKEN_TYPE_DEF | 1, error);
return_if_nok (error);
MonoVTable * vtable = mono_class_vtable_checked (module_klass, error);
return_if_nok (error);
mono_runtime_class_init_full (vtable, error);
}
MonoBoolean
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_SufficientExecutionStack (void)
{
MonoThreadInfo *thread = mono_thread_info_current ();
void *current = &thread;
// Stack upper/lower bound should have been calculated and set as part of register_thread.
// If not, we are optimistic and assume there is enough room.
if (!thread->stack_start_limit || !thread->stack_end)
return TRUE;
// Stack start limit is stack lower bound. Make sure there is enough room left.
void *limit = ((uint8_t *)thread->stack_start_limit) + ALIGN_TO (MONO_STACK_OVERFLOW_GUARD_SIZE + MONO_MIN_EXECUTION_STACK_SIZE, ((gssize)mono_pagesize ()));
if (current < limit)
return FALSE;
if (mono_get_runtime_callbacks ()->is_interpreter_enabled () &&
!mono_get_runtime_callbacks ()->interp_sufficient_stack (MONO_MIN_EXECUTION_STACK_SIZE))
return FALSE;
return TRUE;
}
MonoObjectHandle
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetUninitializedObjectInternal (MonoType *handle, MonoError *error)
{
MonoClass *klass;
MonoVTable *vtable;
g_assert (handle);
klass = mono_class_from_mono_type_internal (handle);
if (m_class_is_string (klass)) {
mono_error_set_argument (error, NULL, NULL);
return NULL_HANDLE;
}
if (mono_class_is_array (klass) || mono_class_is_pointer (klass) || m_type_is_byref (handle)) {
mono_error_set_argument (error, NULL, NULL);
return NULL_HANDLE;
}
if (MONO_TYPE_IS_VOID (handle)) {
mono_error_set_argument (error, NULL, NULL);
return NULL_HANDLE;
}
if (m_class_is_abstract (klass) || m_class_is_interface (klass) || m_class_is_gtd (klass)) {
mono_error_set_member_access (error, NULL);
return NULL_HANDLE;
}
if (m_class_is_byreflike (klass)) {
mono_error_set_not_supported (error, NULL);
return NULL_HANDLE;
}
if (!mono_class_is_before_field_init (klass)) {
vtable = mono_class_vtable_checked (klass, error);
return_val_if_nok (error, NULL_HANDLE);
mono_runtime_class_init_full (vtable, error);
return_val_if_nok (error, NULL_HANDLE);
}
if (m_class_is_nullable (klass))
return mono_object_new_handle (m_class_get_nullable_elem_class (klass), error);
else
return mono_object_new_handle (klass, error);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_PrepareMethod (MonoMethod *method, gpointer inst_types, int n_inst_types, MonoError *error)
{
if (method->flags & METHOD_ATTRIBUTE_ABSTRACT) {
mono_error_set_argument (error, NULL, NULL);
return;
}
MonoGenericContainer *container = NULL;
if (method->is_generic)
container = mono_method_get_generic_container (method);
else if (m_class_is_gtd (method->klass))
container = mono_class_get_generic_container (method->klass);
if (container) {
int nparams = container->type_argc + (container->parent ? container->parent->type_argc : 0);
if (nparams != n_inst_types) {
mono_error_set_argument (error, NULL, NULL);
return;
}
}
// FIXME: Implement
}
MonoObjectHandle
ves_icall_System_Object_MemberwiseClone (MonoObjectHandle this_obj, MonoError *error)
{
return mono_object_clone_handle (this_obj, error);
}
gint32
ves_icall_System_ValueType_InternalGetHashCode (MonoObjectHandle this_obj, MonoArrayHandleOut fields, MonoError *error)
{
MonoClass *klass;
MonoClassField **unhandled = NULL;
int count = 0;
gint32 result = (int)(gsize)mono_defaults.int32_class;
MonoClassField* field;
gpointer iter;
klass = mono_handle_class (this_obj);
if (mono_class_num_fields (klass) == 0)
return result;
/*
* Compute the starting value of the hashcode for fields of primitive
* types, and return the remaining fields in an array to the managed side.
* This way, we can avoid costly reflection operations in managed code.
*/
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
if (mono_field_is_deleted (field))
continue;
gpointer addr = (guint8*)MONO_HANDLE_RAW (this_obj) + field->offset;
/* FIXME: Add more types */
switch (field->type->type) {
case MONO_TYPE_I4:
result ^= *(gint32*)addr;
break;
case MONO_TYPE_PTR:
result ^= mono_aligned_addr_hash (*(gpointer*)addr);
break;
case MONO_TYPE_STRING: {
MonoString *s;
s = *(MonoString**)addr;
if (s != NULL)
result ^= mono_string_hash_internal (s);
break;
}
default:
if (!unhandled)
unhandled = g_newa (MonoClassField*, mono_class_num_fields (klass));
unhandled [count ++] = field;
}
}
if (unhandled) {
MonoArrayHandle fields_arr = mono_array_new_handle (mono_defaults.object_class, count, error);
return_val_if_nok (error, 0);
MONO_HANDLE_ASSIGN (fields, fields_arr);
MonoObjectHandle h = MONO_HANDLE_NEW (MonoObject, NULL);
for (int i = 0; i < count; ++i) {
MonoObject *o = mono_field_get_value_object_checked (unhandled [i], MONO_HANDLE_RAW (this_obj), error);
return_val_if_nok (error, 0);
MONO_HANDLE_ASSIGN_RAW (h, o);
mono_array_handle_setref (fields_arr, i, h);
}
} else {
MONO_HANDLE_ASSIGN (fields, NULL_HANDLE);
}
return result;
}
MonoBoolean
ves_icall_System_ValueType_Equals (MonoObjectHandle this_obj, MonoObjectHandle that, MonoArrayHandleOut fields, MonoError *error)
{
MonoClass *klass;
MonoClassField **unhandled = NULL;
MonoClassField* field;
gpointer iter;
int count = 0;
MONO_CHECK_ARG_NULL_HANDLE (that, FALSE);
MONO_HANDLE_ASSIGN (fields, NULL_HANDLE);
if (mono_handle_vtable (this_obj) != mono_handle_vtable (that))
return FALSE;
klass = mono_handle_class (this_obj);
if (m_class_is_enumtype (klass) && mono_class_enum_basetype_internal (klass) && mono_class_enum_basetype_internal (klass)->type == MONO_TYPE_I4)
return *(gint32*)mono_handle_get_data_unsafe (this_obj) == *(gint32*)mono_handle_get_data_unsafe (that);
/*
* Do the comparison for fields of primitive type and return a result if
* possible. Otherwise, return the remaining fields in an array to the
* managed side. This way, we can avoid costly reflection operations in
* managed code.
*/
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
if (mono_field_is_deleted (field))
continue;
guint8 *this_field = (guint8 *)MONO_HANDLE_RAW (this_obj) + field->offset;
guint8 *that_field = (guint8 *)MONO_HANDLE_RAW (that) + field->offset;
#define UNALIGNED_COMPARE(type) \
do { \
type left, right; \
memcpy (&left, this_field, sizeof (type)); \
memcpy (&right, that_field, sizeof (type)); \
if (left != right) \
return FALSE; \
} while (0)
/* FIXME: Add more types */
switch (field->type->type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
if (*this_field != *that_field)
return FALSE;
break;
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 1 || (intptr_t) that_field & 1))
UNALIGNED_COMPARE (gint16);
else
#endif
if (*(gint16 *) this_field != *(gint16 *) that_field)
return FALSE;
break;
case MONO_TYPE_U4:
case MONO_TYPE_I4:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 3 || (intptr_t) that_field & 3))
UNALIGNED_COMPARE (gint32);
else
#endif
if (*(gint32 *) this_field != *(gint32 *) that_field)
return FALSE;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 7 || (intptr_t) that_field & 7))
UNALIGNED_COMPARE (gint64);
else
#endif
if (*(gint64 *) this_field != *(gint64 *) that_field)
return FALSE;
break;
case MONO_TYPE_R4: {
float d1, d2;
#ifdef NO_UNALIGNED_ACCESS
memcpy (&d1, this_field, sizeof (float));
memcpy (&d2, that_field, sizeof (float));
#else
d1 = *(float *) this_field;
d2 = *(float *) that_field;
#endif
if (d1 != d2 && !(mono_isnan (d1) && mono_isnan (d2)))
return FALSE;
break;
}
case MONO_TYPE_R8: {
double d1, d2;
#ifdef NO_UNALIGNED_ACCESS
memcpy (&d1, this_field, sizeof (double));
memcpy (&d2, that_field, sizeof (double));
#else
d1 = *(double *) this_field;
d2 = *(double *) that_field;
#endif
if (d1 != d2 && !(mono_isnan (d1) && mono_isnan (d2)))
return FALSE;
break;
}
case MONO_TYPE_PTR:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 7 || (intptr_t) that_field & 7))
UNALIGNED_COMPARE (gpointer);
else
#endif
if (*(gpointer *) this_field != *(gpointer *) that_field)
return FALSE;
break;
case MONO_TYPE_STRING: {
MonoString *s1, *s2;
guint32 s1len, s2len;
s1 = *(MonoString**)this_field;
s2 = *(MonoString**)that_field;
if (s1 == s2)
break;
if ((s1 == NULL) || (s2 == NULL))
return FALSE;
s1len = mono_string_length_internal (s1);
s2len = mono_string_length_internal (s2);
if (s1len != s2len)
return FALSE;
if (memcmp (mono_string_chars_internal (s1), mono_string_chars_internal (s2), s1len * sizeof (gunichar2)) != 0)
return FALSE;
break;
}
default:
if (!unhandled)
unhandled = g_newa (MonoClassField*, mono_class_num_fields (klass));
unhandled [count ++] = field;
}
#undef UNALIGNED_COMPARE
if (m_class_is_enumtype (klass))
/* enums only have one non-static field */
break;
}
if (unhandled) {
MonoArrayHandle fields_arr = mono_array_new_handle (mono_defaults.object_class, count * 2, error);
return_val_if_nok (error, 0);
MONO_HANDLE_ASSIGN (fields, fields_arr);
MonoObjectHandle h = MONO_HANDLE_NEW (MonoObject, NULL);
for (int i = 0; i < count; ++i) {
MonoObject *o = mono_field_get_value_object_checked (unhandled [i], MONO_HANDLE_RAW (this_obj), error);
return_val_if_nok (error, FALSE);
MONO_HANDLE_ASSIGN_RAW (h, o);
mono_array_handle_setref (fields_arr, i * 2, h);
o = mono_field_get_value_object_checked (unhandled [i], MONO_HANDLE_RAW (that), error);
return_val_if_nok (error, FALSE);
MONO_HANDLE_ASSIGN_RAW (h, o);
mono_array_handle_setref (fields_arr, (i * 2) + 1, h);
}
return FALSE;
} else {
return TRUE;
}
}
static gboolean
get_executing (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data)
{
MonoMethod **dest = (MonoMethod **)data;
/* skip unmanaged frames */
if (!managed)
return FALSE;
if (!(*dest)) {
if (!strcmp (m_class_get_name_space (m->klass), "System.Reflection"))
return FALSE;
*dest = m;
return TRUE;
}
return FALSE;
}
static gboolean
in_corlib_name_space (MonoClass *klass, const char *name_space)
{
return m_class_get_image (klass) == mono_defaults.corlib &&
!strcmp (m_class_get_name_space (klass), name_space);
}
static gboolean
get_caller_no_reflection (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data)
{
MonoMethod **dest = (MonoMethod **)data;
/* skip unmanaged frames */
if (!managed)
return FALSE;
if (m->wrapper_type != MONO_WRAPPER_NONE)
return FALSE;
if (m == *dest) {
*dest = NULL;
return FALSE;
}
if (in_corlib_name_space (m->klass, "System.Reflection"))
return FALSE;
if (!(*dest)) {
*dest = m;
return TRUE;
}
return FALSE;
}
static gboolean
get_caller_no_system_or_reflection (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data)
{
MonoMethod **dest = (MonoMethod **)data;
/* skip unmanaged frames */
if (!managed)
return FALSE;
if (m->wrapper_type != MONO_WRAPPER_NONE)
return FALSE;
if (m == *dest) {
*dest = NULL;
return FALSE;
}
if (in_corlib_name_space (m->klass, "System.Reflection") || in_corlib_name_space (m->klass, "System"))
return FALSE;
if (!(*dest)) {
*dest = m;
return TRUE;
}
return FALSE;
}
/**
* mono_runtime_get_caller_no_system_or_reflection:
*
* Walk the stack of the current thread and find the first managed method that
* is not in the mscorlib System or System.Reflection namespace. This skips
* unmanaged callers and wrapper methods.
*
* \returns a pointer to the \c MonoMethod or NULL if we walked past all the
* callers.
*/
MonoMethod*
mono_runtime_get_caller_no_system_or_reflection (void)
{
MonoMethod *dest = NULL;
mono_stack_walk_no_il (get_caller_no_system_or_reflection, &dest);
return dest;
}
/*
* mono_runtime_get_caller_from_stack_mark:
*
* Walk the stack and return the assembly of the method referenced
* by the stack mark STACK_MARK.
*/
MonoAssembly*
mono_runtime_get_caller_from_stack_mark (MonoStackCrawlMark *stack_mark)
{
// FIXME: Use the stack mark
MonoMethod *dest = NULL;
mono_stack_walk_no_il (get_caller_no_system_or_reflection, &dest);
if (dest)
return m_class_get_image (dest->klass)->assembly;
else
return NULL;
}
static MonoReflectionType*
type_from_parsed_name (MonoTypeNameParse *info, MonoStackCrawlMark *stack_mark, MonoBoolean ignoreCase, MonoAssembly **caller_assembly, MonoError *error)
{
MonoMethod *m;
MonoType *type = NULL;
MonoAssembly *assembly = NULL;
gboolean type_resolve = FALSE;
MonoImage *rootimage = NULL;
MonoAssemblyLoadContext *alc = mono_alc_get_ambient ();
/*
* We must compute the calling assembly as type loading must happen under a metadata context.
* For example. The main assembly is a.exe and Type.GetType is called from dir/b.dll. Without
* the metadata context (basedir currently) set to dir/b.dll we won't be able to load a dir/c.dll.
*/
m = mono_method_get_last_managed ();
if (m && m_class_get_image (m->klass) != mono_defaults.corlib) {
/* Happens with inlining */
assembly = m_class_get_image (m->klass)->assembly;
} else {
assembly = mono_runtime_get_caller_from_stack_mark (stack_mark);
}
if (assembly) {
type_resolve = TRUE;
rootimage = assembly->image;
} else {
// FIXME: once wasm can use stack marks, consider turning all this into an assert
g_warning (G_STRLOC);
}
*caller_assembly = assembly;
if (info->assembly.name) {
MonoAssemblyByNameRequest req;
mono_assembly_request_prepare_byname (&req, alc);
req.requesting_assembly = assembly;
req.basedir = assembly ? assembly->basedir : NULL;
assembly = mono_assembly_request_byname (&info->assembly, &req, NULL);
}
if (assembly) {
/* When loading from the current assembly, AppDomain.TypeResolve will not be called yet */
type = mono_reflection_get_type_checked (alc, rootimage, assembly->image, info, ignoreCase, TRUE, &type_resolve, error);
goto_if_nok (error, fail);
}
// XXXX - aleksey -
// Say we're looking for System.Generic.Dict<int, Local>
// we FAIL the get type above, because S.G.Dict isn't in assembly->image. So we drop down here.
// but then we FAIL AGAIN because now we pass null as the image and the rootimage and everything
// is messed up when we go to construct the Local as the type arg...
//
// By contrast, if we started with Mine<System.Generic.Dict<int, Local>> we'd go in with assembly->image
// as the root and then even the detour into generics would still not cause issues when we went to load Local.
if (!info->assembly.name && !type) {
/* try mscorlib */
type = mono_reflection_get_type_checked (alc, rootimage, NULL, info, ignoreCase, TRUE, &type_resolve, error);
goto_if_nok (error, fail);
}
if (assembly && !type && type_resolve) {
type_resolve = FALSE; /* This will invoke TypeResolve if not done in the first 'if' */
type = mono_reflection_get_type_checked (alc, rootimage, assembly->image, info, ignoreCase, TRUE, &type_resolve, error);
goto_if_nok (error, fail);
}
if (!type)
goto fail;
return mono_type_get_object_checked (type, error);
fail:
return NULL;
}
void
ves_icall_System_RuntimeTypeHandle_internal_from_name (char *name,
MonoStackCrawlMark *stack_mark,
MonoObjectHandleOnStack res,
MonoBoolean throwOnError,
MonoBoolean ignoreCase,
MonoError *error)
{
MonoTypeNameParse info;
gboolean free_info = FALSE;
MonoAssembly *caller_assembly;
free_info = TRUE;
if (!mono_reflection_parse_type_checked (name, &info, error))
goto leave;
/* mono_reflection_parse_type() mangles the string */
HANDLE_ON_STACK_SET (res, type_from_parsed_name (&info, (MonoStackCrawlMark*)stack_mark, ignoreCase, &caller_assembly, error));
goto_if_nok (error, leave);
if (!(*res)) {
if (throwOnError) {
char *tname = info.name_space ? g_strdup_printf ("%s.%s", info.name_space, info.name) : g_strdup (info.name);
char *aname;
if (info.assembly.name)
aname = mono_stringify_assembly_name (&info.assembly);
else if (caller_assembly)
aname = mono_stringify_assembly_name (mono_assembly_get_name_internal (caller_assembly));
else
aname = g_strdup ("");
mono_error_set_type_load_name (error, tname, aname, "");
}
goto leave;
}
leave:
if (free_info)
mono_reflection_free_type_info (&info);
if (!is_ok (error)) {
if (!throwOnError) {
mono_error_cleanup (error);
error_init (error);
}
}
}
MonoReflectionTypeHandle
ves_icall_System_Type_internal_from_handle (MonoType *handle, MonoError *error)
{
return mono_type_get_object_handle (handle, error);
}
MonoType*
ves_icall_Mono_RuntimeClassHandle_GetTypeFromClass (MonoClass *klass)
{
return m_class_get_byval_arg (klass);
}
void
ves_icall_Mono_RuntimeGPtrArrayHandle_GPtrArrayFree (GPtrArray *ptr_array)
{
g_ptr_array_free (ptr_array, TRUE);
}
void
ves_icall_Mono_SafeStringMarshal_GFree (void *c_str)
{
g_free (c_str);
}
char*
ves_icall_Mono_SafeStringMarshal_StringToUtf8 (MonoString *volatile* s)
{
ERROR_DECL (error);
char *result = mono_string_to_utf8_checked_internal (*s, error);
mono_error_set_pending_exception (error);
return result;
}
/* System.TypeCode */
typedef enum {
TYPECODE_EMPTY,
TYPECODE_OBJECT,
TYPECODE_DBNULL,
TYPECODE_BOOLEAN,
TYPECODE_CHAR,
TYPECODE_SBYTE,
TYPECODE_BYTE,
TYPECODE_INT16,
TYPECODE_UINT16,
TYPECODE_INT32,
TYPECODE_UINT32,
TYPECODE_INT64,
TYPECODE_UINT64,
TYPECODE_SINGLE,
TYPECODE_DOUBLE,
TYPECODE_DECIMAL,
TYPECODE_DATETIME,
TYPECODE_STRING = 18
} TypeCode;
MonoBoolean
ves_icall_RuntimeTypeHandle_type_is_assignable_from (MonoQCallTypeHandle type_handle, MonoQCallTypeHandle c_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoType *ctype = c_handle.type;
MonoClass *klassc = mono_class_from_mono_type_internal (ctype);
if (m_type_is_byref (type) ^ m_type_is_byref (ctype))
return FALSE;
if (m_type_is_byref (type)) {
return mono_byref_type_is_assignable_from (type, ctype, FALSE);
}
gboolean result;
mono_class_is_assignable_from_checked (klass, klassc, &result, error);
return result;
}
MonoBoolean
ves_icall_RuntimeTypeHandle_is_subclass_of (MonoQCallTypeHandle child_handle, MonoQCallTypeHandle base_handle, MonoError *error)
{
MonoType *childType = child_handle.type;
MonoType *baseType = base_handle.type;
mono_bool result = FALSE;
MonoClass *childClass;
MonoClass *baseClass;
childClass = mono_class_from_mono_type_internal (childType);
baseClass = mono_class_from_mono_type_internal (baseType);
if (G_UNLIKELY (m_type_is_byref (childType)))
return !m_type_is_byref (baseType) && baseClass == mono_defaults.object_class;
if (G_UNLIKELY (m_type_is_byref (baseType)))
return FALSE;
if (childType == baseType)
/* .NET IsSubclassOf is not reflexive */
return FALSE;
if (G_UNLIKELY (is_generic_parameter (childType))) {
/* slow path: walk the type hierarchy looking at base types
* until we see baseType. If the current type is not a gparam,
* break out of the loop and use is_subclass_of.
*/
MonoClass *c = mono_generic_param_get_base_type (childClass);
result = FALSE;
while (c != NULL) {
if (c == baseClass)
return TRUE;
if (!is_generic_parameter (m_class_get_byval_arg (c)))
return mono_class_is_subclass_of_internal (c, baseClass, FALSE);
else
c = mono_generic_param_get_base_type (c);
}
return result;
} else {
return mono_class_is_subclass_of_internal (childClass, baseClass, FALSE);
}
}
guint32
ves_icall_RuntimeTypeHandle_IsInstanceOfType (MonoQCallTypeHandle type_handle, MonoObjectHandle obj, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_val_if_nok (error, FALSE);
MonoObjectHandle inst = mono_object_handle_isinst (obj, klass, error);
return_val_if_nok (error, FALSE);
return !MONO_HANDLE_IS_NULL (inst);
}
guint32
ves_icall_RuntimeTypeHandle_GetAttributes (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type) || type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR)
return TYPE_ATTRIBUTE_PUBLIC;
MonoClass *klass = mono_class_from_mono_type_internal (type);
return mono_class_get_flags (klass);
}
guint32
ves_icall_RuntimeTypeHandle_GetMetadataToken (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *mc = mono_class_from_mono_type_internal (type);
if (!mono_class_init_internal (mc)) {
mono_error_set_for_class_failure (error, mc);
return 0;
}
return m_class_get_type_token (mc);
}
MonoReflectionMarshalAsAttributeHandle
ves_icall_System_Reflection_FieldInfo_get_marshal_info (MonoReflectionFieldHandle field_h, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (field_h, field);
MonoClass *klass = m_field_get_parent (field);
MonoGenericClass *gklass = mono_class_try_get_generic_class (klass);
if (mono_class_is_gtd (klass) ||
(gklass && gklass->context.class_inst->is_open))
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
MonoType *ftype = mono_field_get_type_internal (field);
if (ftype && !(ftype->attrs & FIELD_ATTRIBUTE_HAS_FIELD_MARSHAL))
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
MonoMarshalType *info = mono_marshal_load_type_info (klass);
for (int i = 0; i < info->num_fields; ++i) {
if (info->fields [i].field == field) {
if (!info->fields [i].mspec)
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
else {
return mono_reflection_marshal_as_attribute_from_marshal_spec (klass, info->fields [i].mspec, error);
}
}
}
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
}
MonoReflectionFieldHandle
ves_icall_System_Reflection_FieldInfo_internal_from_handle_type (MonoClassField *handle, MonoType *type, MonoError *error)
{
MonoClass *klass;
g_assert (handle);
if (!type) {
klass = m_field_get_parent (handle);
} else {
klass = mono_class_from_mono_type_internal (type);
gboolean found = klass == m_field_get_parent (handle) || mono_class_has_parent (klass, m_field_get_parent (handle));
if (!found)
/* The managed code will throw the exception */
return MONO_HANDLE_CAST (MonoReflectionField, NULL_HANDLE);
}
return mono_field_get_object_handle (klass, handle, error);
}
MonoReflectionEventHandle
ves_icall_System_Reflection_EventInfo_internal_from_handle_type (MonoEvent *handle, MonoType *type, MonoError *error)
{
MonoClass *klass;
g_assert (handle);
if (!type) {
klass = handle->parent;
} else {
klass = mono_class_from_mono_type_internal (type);
gboolean found = klass == handle->parent || mono_class_has_parent (klass, handle->parent);
if (!found)
/* Managed code will throw an exception */
return MONO_HANDLE_CAST (MonoReflectionEvent, NULL_HANDLE);
}
return mono_event_get_object_handle (klass, handle, error);
}
MonoReflectionPropertyHandle
ves_icall_System_Reflection_RuntimePropertyInfo_internal_from_handle_type (MonoProperty *handle, MonoType *type, MonoError *error)
{
MonoClass *klass;
g_assert (handle);
if (!type) {
klass = handle->parent;
} else {
klass = mono_class_from_mono_type_internal (type);
gboolean found = klass == handle->parent || mono_class_has_parent (klass, handle->parent);
if (!found)
/* Managed code will throw an exception */
return MONO_HANDLE_CAST (MonoReflectionProperty, NULL_HANDLE);
}
return mono_property_get_object_handle (klass, handle, error);
}
MonoArrayHandle
ves_icall_System_Reflection_FieldInfo_GetTypeModifiers (MonoReflectionFieldHandle field_h, MonoBoolean optional, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (field_h, field);
MonoType *type = mono_field_get_type_checked (field, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
return type_array_from_modifiers (type, optional, error);
}
int
ves_icall_get_method_attributes (MonoMethod *method)
{
return method->flags;
}
void
ves_icall_get_method_info (MonoMethod *method, MonoMethodInfo *info, MonoError *error)
{
MonoMethodSignature* sig = mono_method_signature_checked (method, error);
return_if_nok (error);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (method->klass), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, parent, MONO_HANDLE_RAW (rt));
MONO_HANDLE_ASSIGN (rt, mono_type_get_object_handle (sig->ret, error));
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, ret, MONO_HANDLE_RAW (rt));
info->attrs = method->flags;
info->implattrs = method->iflags;
guint32 callconv;
if (sig->call_convention == MONO_CALL_DEFAULT)
callconv = sig->sentinelpos >= 0 ? 2 : 1;
else {
if (sig->call_convention == MONO_CALL_VARARG || sig->sentinelpos >= 0)
callconv = 2;
else
callconv = 1;
}
callconv |= (sig->hasthis << 5) | (sig->explicit_this << 6);
info->callconv = callconv;
}
MonoArrayHandle
ves_icall_System_Reflection_MonoMethodInfo_get_parameter_info (MonoMethod *method, MonoReflectionMethodHandle member, MonoError *error)
{
MonoReflectionTypeHandle reftype = MONO_HANDLE_NEW (MonoReflectionType, NULL);
MONO_HANDLE_GET (reftype, member, reftype);
MonoClass *klass = NULL;
if (!MONO_HANDLE_IS_NULL (reftype))
klass = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (reftype, type));
return mono_param_get_objects_internal (method, klass, error);
}
MonoReflectionMarshalAsAttributeHandle
ves_icall_System_MonoMethodInfo_get_retval_marshal (MonoMethod *method, MonoError *error)
{
MonoReflectionMarshalAsAttributeHandle res = MONO_HANDLE_NEW (MonoReflectionMarshalAsAttribute, NULL);
MonoMarshalSpec **mspecs = g_new (MonoMarshalSpec*, mono_method_signature_internal (method)->param_count + 1);
mono_method_get_marshal_info (method, mspecs);
if (mspecs [0]) {
MONO_HANDLE_ASSIGN (res, mono_reflection_marshal_as_attribute_from_marshal_spec (method->klass, mspecs [0], error));
goto_if_nok (error, leave);
}
leave:
for (int i = mono_method_signature_internal (method)->param_count; i >= 0; i--)
if (mspecs [i])
mono_metadata_free_marshal_spec (mspecs [i]);
g_free (mspecs);
return res;
}
gint32
ves_icall_RuntimeFieldInfo_GetFieldOffset (MonoReflectionFieldHandle field, MonoError *error)
{
MonoClassField *class_field = MONO_HANDLE_GETVAL (field, field);
mono_class_setup_fields (m_field_get_parent (class_field));
return class_field->offset - MONO_ABI_SIZEOF (MonoObject);
}
MonoReflectionTypeHandle
ves_icall_RuntimeFieldInfo_GetParentType (MonoReflectionFieldHandle field, MonoBoolean declaring, MonoError *error)
{
MonoClass *parent;
if (declaring) {
MonoClassField *f = MONO_HANDLE_GETVAL (field, field);
parent = m_field_get_parent (f);
} else {
parent = MONO_HANDLE_GETVAL (field, klass);
}
return mono_type_get_object_handle (m_class_get_byval_arg (parent), error);
}
MonoObjectHandle
ves_icall_RuntimeFieldInfo_GetValueInternal (MonoReflectionFieldHandle field_handle, MonoObjectHandle obj_handle, MonoError *error)
{
MonoReflectionField * const field = MONO_HANDLE_RAW (field_handle);
MonoClassField *cf = field->field;
MonoObject * const obj = MONO_HANDLE_RAW (obj_handle);
MonoObject *result;
result = mono_field_get_value_object_checked (cf, obj, error);
return MONO_HANDLE_NEW (MonoObject, result);
}
void
ves_icall_RuntimeFieldInfo_SetValueInternal (MonoReflectionFieldHandle field, MonoObjectHandle obj, MonoObjectHandle value, MonoError *error)
{
MonoClassField *cf = MONO_HANDLE_GETVAL (field, field);
MonoType *type = mono_field_get_type_checked (cf, error);
return_if_nok (error);
gboolean isref = FALSE;
MonoGCHandle value_gchandle = 0;
gchar *v = NULL;
if (!m_type_is_byref (type)) {
switch (type->type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR:
case MONO_TYPE_U:
case MONO_TYPE_I:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8:
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_PTR:
isref = FALSE;
if (!MONO_HANDLE_IS_NULL (value)) {
if (m_class_is_valuetype (mono_handle_class (value)))
v = (char*)mono_object_handle_pin_unbox (value, &value_gchandle);
else {
char* n = g_strdup_printf ("Object of type '%s' cannot be converted to type '%s'.", m_class_get_name (mono_handle_class (value)), m_class_get_name (mono_class_from_mono_type_internal (type)));
mono_error_set_argument (error, cf->name, n);
g_free (n);
return;
}
}
break;
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_CLASS:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
/* Do nothing */
isref = TRUE;
break;
case MONO_TYPE_GENERICINST: {
MonoGenericClass *gclass = type->data.generic_class;
g_assert (!gclass->context.class_inst->is_open);
if (mono_class_is_nullable (mono_class_from_mono_type_internal (type))) {
MonoClass *nklass = mono_class_from_mono_type_internal (type);
/*
* Convert the boxed vtype into a Nullable structure.
* This is complicated by the fact that Nullables have
* a variable structure.
*/
MonoObjectHandle nullable = mono_object_new_handle (nklass, error);
return_if_nok (error);
MonoGCHandle nullable_gchandle = 0;
guint8 *nval = (guint8*)mono_object_handle_pin_unbox (nullable, &nullable_gchandle);
mono_nullable_init_from_handle (nval, value, nklass);
isref = FALSE;
value_gchandle = nullable_gchandle;
v = (gchar*)nval;
}
else {
isref = !m_class_is_valuetype (gclass->container_class);
if (!isref && !MONO_HANDLE_IS_NULL (value)) {
v = (char*)mono_object_handle_pin_unbox (value, &value_gchandle);
};
}
break;
}
default:
g_error ("type 0x%x not handled in "
"ves_icall_FieldInfo_SetValueInternal", type->type);
return;
}
}
/* either value is a reference type, or it's a value type and we pinned
* it and v points to the payload. */
g_assert ((isref && v == NULL && value_gchandle == 0) ||
(!isref && v != NULL && value_gchandle != 0) ||
(!isref && v == NULL && value_gchandle == 0));
if (type->attrs & FIELD_ATTRIBUTE_STATIC) {
MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (cf), error);
goto_if_nok (error, leave);
if (!vtable->initialized) {
if (!mono_runtime_class_init_full (vtable, error))
goto leave;
}
if (isref)
mono_field_static_set_value_internal (vtable, cf, MONO_HANDLE_RAW (value)); /* FIXME make mono_field_static_set_value work with handles for value */
else
mono_field_static_set_value_internal (vtable, cf, v);
} else {
if (isref)
MONO_HANDLE_SET_FIELD_REF (obj, cf, value);
else
mono_field_set_value_internal (MONO_HANDLE_RAW (obj), cf, v); /* FIXME: make mono_field_set_value take a handle for obj */
}
leave:
if (value_gchandle)
mono_gchandle_free_internal (value_gchandle);
}
static MonoObjectHandle
typed_reference_to_object (MonoTypedRef *tref, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoObjectHandle result;
if (MONO_TYPE_IS_REFERENCE (tref->type)) {
MonoObject** objp = (MonoObject **)tref->value;
result = MONO_HANDLE_NEW (MonoObject, *objp);
} else if (mono_type_is_pointer (tref->type)) {
/* Boxed as UIntPtr */
result = mono_value_box_handle (mono_get_uintptr_class (), tref->value, error);
} else {
result = mono_value_box_handle (tref->klass, tref->value, error);
}
HANDLE_FUNCTION_RETURN_REF (MonoObject, result);
}
MonoObjectHandle
ves_icall_System_RuntimeFieldHandle_GetValueDirect (MonoReflectionFieldHandle field_h, MonoReflectionTypeHandle field_type_h, MonoTypedRef *obj, MonoReflectionTypeHandle context_type_h, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (field_h, field);
MonoClass *klass = mono_class_from_mono_type_internal (field->type);
if (!MONO_TYPE_ISSTRUCT (m_class_get_byval_arg (m_field_get_parent (field)))) {
mono_error_set_not_implemented (error, "");
return MONO_HANDLE_NEW (MonoObject, NULL);
} else if (MONO_TYPE_IS_REFERENCE (field->type)) {
return MONO_HANDLE_NEW (MonoObject, *(MonoObject**)((guint8*)obj->value + field->offset - sizeof (MonoObject)));
} else {
return mono_value_box_handle (klass, (guint8*)obj->value + field->offset - sizeof (MonoObject), error);
}
}
void
ves_icall_System_RuntimeFieldHandle_SetValueDirect (MonoReflectionFieldHandle field_h, MonoReflectionTypeHandle field_type_h, MonoTypedRef *obj, MonoObjectHandle value_h, MonoReflectionTypeHandle context_type_h, MonoError *error)
{
MonoClassField *f = MONO_HANDLE_GETVAL (field_h, field);
g_assert (obj);
mono_class_setup_fields (m_field_get_parent (f));
if (!MONO_TYPE_ISSTRUCT (m_class_get_byval_arg (m_field_get_parent (f)))) {
MonoObjectHandle objHandle = typed_reference_to_object (obj, error);
return_if_nok (error);
ves_icall_RuntimeFieldInfo_SetValueInternal (field_h, objHandle, value_h, error);
} else if (MONO_TYPE_IS_REFERENCE (f->type)) {
mono_copy_value (f->type, (guint8*)obj->value + m_field_get_offset (f) - sizeof (MonoObject), MONO_HANDLE_RAW (value_h), FALSE);
} else {
MonoGCHandle gchandle = NULL;
g_assert (MONO_HANDLE_RAW (value_h));
mono_copy_value (f->type, (guint8*)obj->value + m_field_get_offset (f) - sizeof (MonoObject), mono_object_handle_pin_unbox (value_h, &gchandle), FALSE);
mono_gchandle_free_internal (gchandle);
}
}
MonoObjectHandle
ves_icall_RuntimeFieldInfo_GetRawConstantValue (MonoReflectionFieldHandle rfield, MonoError* error)
{
MonoObjectHandle o_handle = NULL_HANDLE_INIT;
MonoObject *o = NULL;
MonoClassField *field = MONO_HANDLE_GETVAL (rfield, field);
MonoClass *klass;
gchar *v;
MonoTypeEnum def_type;
const char *def_value;
MonoType *t;
MonoStringHandle string_handle = MONO_HANDLE_NEW (MonoString, NULL); // FIXME? Not always needed.
mono_class_init_internal (m_field_get_parent (field));
t = mono_field_get_type_checked (field, error);
goto_if_nok (error, return_null);
if (!(t->attrs & FIELD_ATTRIBUTE_HAS_DEFAULT))
goto invalid_operation;
if (image_is_dynamic (m_class_get_image (m_field_get_parent (field)))) {
MonoClass *klass = m_field_get_parent (field);
int fidx = field - m_class_get_fields (klass);
MonoFieldDefaultValue *def_values = mono_class_get_field_def_values (klass);
g_assert (def_values);
def_type = def_values [fidx].def_type;
def_value = def_values [fidx].data;
if (def_type == MONO_TYPE_END)
goto invalid_operation;
} else {
def_value = mono_class_get_field_default_value (field, &def_type);
/* FIXME, maybe we should try to raise TLE if field->parent is broken */
if (!def_value)
goto invalid_operation;
}
/*FIXME unify this with reflection.c:mono_get_object_from_blob*/
switch (def_type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR:
case MONO_TYPE_U:
case MONO_TYPE_I:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8: {
MonoType *t;
/* boxed value type */
t = g_new0 (MonoType, 1);
t->type = def_type;
klass = mono_class_from_mono_type_internal (t);
g_free (t);
o = mono_object_new_checked (klass, error);
goto_if_nok (error, return_null);
o_handle = MONO_HANDLE_NEW (MonoObject, o);
v = ((gchar *) o) + sizeof (MonoObject);
(void)mono_get_constant_value_from_blob (def_type, def_value, v, string_handle, error);
goto_if_nok (error, return_null);
break;
}
case MONO_TYPE_STRING:
case MONO_TYPE_CLASS:
(void)mono_get_constant_value_from_blob (def_type, def_value, &o, string_handle, error);
goto_if_nok (error, return_null);
o_handle = MONO_HANDLE_NEW (MonoObject, o);
break;
default:
g_assert_not_reached ();
}
goto exit;
invalid_operation:
mono_error_set_invalid_operation (error, NULL);
// fall through
return_null:
o_handle = NULL_HANDLE;
// fall through
exit:
return o_handle;
}
MonoReflectionTypeHandle
ves_icall_RuntimeFieldInfo_ResolveType (MonoReflectionFieldHandle ref_field, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (ref_field, field);
MonoType *type = mono_field_get_type_checked (field, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE));
return mono_type_get_object_handle (type, error);
}
void
ves_icall_RuntimePropertyInfo_get_property_info (MonoReflectionPropertyHandle property, MonoPropertyInfo *info, PInfo req_info, MonoError *error)
{
const MonoProperty *pproperty = MONO_HANDLE_GETVAL (property, property);
if ((req_info & PInfo_ReflectedType) != 0) {
MonoClass *klass = MONO_HANDLE_GETVAL (property, klass);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, parent, MONO_HANDLE_RAW (rt));
}
if ((req_info & PInfo_DeclaringType) != 0) {
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (pproperty->parent), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, declaring_type, MONO_HANDLE_RAW (rt));
}
if ((req_info & PInfo_Name) != 0) {
MonoStringHandle name = mono_string_new_handle (pproperty->name, error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, name, MONO_HANDLE_RAW (name));
}
if ((req_info & PInfo_Attributes) != 0)
info->attrs = pproperty->attrs;
if ((req_info & PInfo_GetMethod) != 0) {
MonoClass *property_klass = MONO_HANDLE_GETVAL (property, klass);
MonoReflectionMethodHandle rm;
if (pproperty->get &&
(((pproperty->get->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) != METHOD_ATTRIBUTE_PRIVATE) ||
pproperty->get->klass == property_klass)) {
rm = mono_method_get_object_handle (pproperty->get, property_klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, get, MONO_HANDLE_RAW (rm));
}
if ((req_info & PInfo_SetMethod) != 0) {
MonoClass *property_klass = MONO_HANDLE_GETVAL (property, klass);
MonoReflectionMethodHandle rm;
if (pproperty->set &&
(((pproperty->set->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) != METHOD_ATTRIBUTE_PRIVATE) ||
pproperty->set->klass == property_klass)) {
rm = mono_method_get_object_handle (pproperty->set, property_klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, set, MONO_HANDLE_RAW (rm));
}
/*
* There may be other methods defined for properties, though, it seems they are not exposed
* in the reflection API
*/
}
static gboolean
add_event_other_methods_to_array (MonoMethod *m, MonoArrayHandle dest, int i, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoReflectionMethodHandle rm = mono_method_get_object_handle (m, NULL, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, i, rm);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_RuntimeEventInfo_get_event_info (MonoReflectionMonoEventHandle ref_event, MonoEventInfo *info, MonoError *error)
{
MonoClass *klass = MONO_HANDLE_GETVAL (ref_event, klass);
MonoEvent *event = MONO_HANDLE_GETVAL (ref_event, event);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, reflected_type, MONO_HANDLE_RAW (rt));
rt = mono_type_get_object_handle (m_class_get_byval_arg (event->parent), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, declaring_type, MONO_HANDLE_RAW (rt));
MonoStringHandle ev_name = mono_string_new_handle (event->name, error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, name, MONO_HANDLE_RAW (ev_name));
info->attrs = event->attrs;
MonoReflectionMethodHandle rm;
if (event->add) {
rm = mono_method_get_object_handle (event->add, klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, add_method, MONO_HANDLE_RAW (rm));
if (event->remove) {
rm = mono_method_get_object_handle (event->remove, klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, remove_method, MONO_HANDLE_RAW (rm));
if (event->raise) {
rm = mono_method_get_object_handle (event->raise, klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, raise_method, MONO_HANDLE_RAW (rm));
#ifndef MONO_SMALL_CONFIG
if (event->other) {
int i, n = 0;
while (event->other [n])
n++;
MonoArrayHandle info_arr = mono_array_new_handle (mono_defaults.method_info_class, n, error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, other_methods, MONO_HANDLE_RAW (info_arr));
for (i = 0; i < n; i++)
if (!add_event_other_methods_to_array (event->other [i], info_arr, i, error))
return;
}
#endif
}
static void
collect_interfaces (MonoClass *klass, GHashTable *ifaces, MonoError *error)
{
int i;
MonoClass *ic;
mono_class_setup_interfaces (klass, error);
return_if_nok (error);
int klass_interface_count = m_class_get_interface_count (klass);
MonoClass **klass_interfaces = m_class_get_interfaces (klass);
for (i = 0; i < klass_interface_count; i++) {
ic = klass_interfaces [i];
g_hash_table_insert (ifaces, ic, ic);
collect_interfaces (ic, ifaces, error);
return_if_nok (error);
}
}
typedef struct {
MonoArrayHandle iface_array;
MonoGenericContext *context;
MonoError *error;
int next_idx;
} FillIfaceArrayData;
static void
fill_iface_array (gpointer key, gpointer value, gpointer user_data)
{
HANDLE_FUNCTION_ENTER ();
FillIfaceArrayData *data = (FillIfaceArrayData *)user_data;
MonoClass *ic = (MonoClass *)key;
MonoType *ret = m_class_get_byval_arg (ic), *inflated = NULL;
MonoError *error = data->error;
goto_if_nok (error, leave);
if (data->context && mono_class_is_ginst (ic) && mono_class_get_generic_class (ic)->context.class_inst->is_open) {
inflated = ret = mono_class_inflate_generic_type_checked (ret, data->context, error);
goto_if_nok (error, leave);
}
MonoReflectionTypeHandle rt;
rt = mono_type_get_object_handle (ret, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (data->iface_array, data->next_idx, rt);
data->next_idx++;
if (inflated)
mono_metadata_free_type (inflated);
leave:
HANDLE_FUNCTION_RETURN ();
}
static guint
get_interfaces_hash (gconstpointer v1)
{
MonoClass *k = (MonoClass*)v1;
return m_class_get_type_token (k);
}
void
ves_icall_RuntimeType_GetInterfaces (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
GHashTable *iface_hash = g_hash_table_new (get_interfaces_hash, NULL);
MonoGenericContext *context = NULL;
if (mono_class_is_ginst (klass) && mono_class_get_generic_class (klass)->context.class_inst->is_open) {
context = mono_class_get_context (klass);
klass = mono_class_get_generic_class (klass)->container_class;
}
for (MonoClass *parent = klass; parent; parent = m_class_get_parent (parent)) {
mono_class_setup_interfaces (parent, error);
goto_if_nok (error, fail);
collect_interfaces (parent, iface_hash, error);
goto_if_nok (error, fail);
}
MonoDomain *domain = mono_get_root_domain ();
int len;
len = g_hash_table_size (iface_hash);
if (len == 0) {
g_hash_table_destroy (iface_hash);
if (!domain->empty_types) {
domain->empty_types = mono_array_new_cached (mono_defaults.runtimetype_class, 0, error);
goto_if_nok (error, fail);
}
HANDLE_ON_STACK_SET (res, domain->empty_types);
return;
}
FillIfaceArrayData data;
data.iface_array = MONO_HANDLE_NEW (MonoArray, mono_array_new_cached (mono_defaults.runtimetype_class, len, error));
goto_if_nok (error, fail);
data.context = context;
data.error = error;
data.next_idx = 0;
g_hash_table_foreach (iface_hash, fill_iface_array, &data);
goto_if_nok (error, fail);
g_hash_table_destroy (iface_hash);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (data.iface_array));
return;
fail:
g_hash_table_destroy (iface_hash);
}
static gboolean
method_is_reabstracted (MonoMethod *method)
{
/* only on interfaces */
/* method is marked "final abstract" */
/* FIXME: we need some other way to detect reabstracted methods. "final" is an incidental detail of the spec. */
return m_method_is_final (method) && m_method_is_abstract (method);
}
static gboolean
method_is_dim (MonoMethod *method)
{
/* only valid on interface methods*/
/* method is marked "virtual" but not "virtual abstract" */
return m_method_is_virtual (method) && !m_method_is_abstract (method);
}
static gboolean
set_interface_map_data_method_object (MonoMethod *method, MonoClass *iclass, int ioffset, MonoClass *klass, MonoArrayHandle targets, MonoArrayHandle methods, int i, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoReflectionMethodHandle member = mono_method_get_object_handle (method, iclass, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (methods, i, member);
MonoMethod* foundMethod = m_class_get_vtable (klass) [i + ioffset];
if (mono_class_has_dim_conflicts (klass) && mono_class_is_interface (foundMethod->klass)) {
GSList* conflicts = mono_class_get_dim_conflicts (klass);
GSList* l;
MonoMethod* decl = method;
if (decl->is_inflated)
decl = ((MonoMethodInflated*)decl)->declaring;
gboolean in_conflict = FALSE;
for (l = conflicts; l; l = l->next) {
if (decl == l->data) {
in_conflict = TRUE;
break;
}
}
if (in_conflict) {
MONO_HANDLE_ARRAY_SETREF (targets, i, NULL_HANDLE);
goto leave;
}
}
/*
* if the iterface method is reabstracted, and either the found implementation method is abstract, or the found
* implementation method is from another DIM (meaning neither klass nor any of its ancestor classes implemented
* the method), then say the target method is null.
*/
if (method_is_reabstracted (method) &&
(m_method_is_abstract (foundMethod) ||
(mono_class_is_interface (foundMethod->klass) && method_is_dim (foundMethod))))
MONO_HANDLE_ARRAY_SETREF (targets, i, NULL_HANDLE);
else if (mono_class_is_interface (foundMethod->klass) && method_is_reabstracted (foundMethod) && !m_class_is_abstract (klass)) {
/* if the method we found is a reabstracted DIM method, but the class isn't abstract, return NULL */
/*
* (C# doesn't seem to allow constructing such types, it requires the whole class to be abstract - in
* which case we are supposed to return the reabstracted interface method. But in IL we can make a
* non-abstract class with reabstracted interface methods - which is supposed to fail with an
* EntryPointNotFoundException at invoke time, but does not prevent the class from loading.)
*/
MONO_HANDLE_ARRAY_SETREF (targets, i, NULL_HANDLE);
} else {
MONO_HANDLE_ASSIGN (member, mono_method_get_object_handle (foundMethod, mono_class_is_interface (foundMethod->klass) ? foundMethod->klass : klass, error));
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (targets, i, member);
}
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_RuntimeType_GetInterfaceMapData (MonoQCallTypeHandle type_handle, MonoQCallTypeHandle iface_handle, MonoArrayHandleOut targets, MonoArrayHandleOut methods, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoType *iface = iface_handle.type;
MonoClass *iclass = mono_class_from_mono_type_internal (iface);
mono_class_init_checked (klass, error);
return_if_nok (error);
mono_class_init_checked (iclass, error);
return_if_nok (error);
mono_class_setup_vtable (klass);
gboolean variance_used;
int ioffset = mono_class_interface_offset_with_variance (klass, iclass, &variance_used);
if (ioffset == -1)
return;
MonoMethod* method;
int i = 0;
gpointer iter = NULL;
while ((method = mono_class_get_methods(iclass, &iter))) {
if (method->flags & METHOD_ATTRIBUTE_VIRTUAL)
i++;
}
MonoArrayHandle targets_arr = mono_array_new_handle (mono_defaults.method_info_class, i, error);
return_if_nok (error);
MONO_HANDLE_ASSIGN (targets, targets_arr);
MonoArrayHandle methods_arr = mono_array_new_handle (mono_defaults.method_info_class, i, error);
return_if_nok (error);
MONO_HANDLE_ASSIGN (methods, methods_arr);
i = 0;
iter = NULL;
while ((method = mono_class_get_methods (iclass, &iter))) {
if (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL))
continue;
if (!set_interface_map_data_method_object (method, iclass, ioffset, klass, targets, methods, i, error))
return;
i ++;
}
}
void
ves_icall_RuntimeType_GetPacking (MonoQCallTypeHandle type_handle, guint32 *packing, guint32 *size, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
if (image_is_dynamic (m_class_get_image (klass))) {
MonoGCHandle ref_info_handle = mono_class_get_ref_info_handle (klass);
g_assert (ref_info_handle);
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mono_gchandle_get_target_internal (ref_info_handle);
g_assert (tb);
*packing = tb->packing_size;
*size = tb->class_size;
} else {
mono_metadata_packing_from_typedef (m_class_get_image (klass), m_class_get_type_token (klass), packing, size);
}
}
void
ves_icall_RuntimeTypeHandle_GetElementType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (!m_type_is_byref (type) && type->type == MONO_TYPE_SZARRAY) {
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (type->data.klass), error));
return;
}
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
// GetElementType should only return a type for:
// Array Pointer PassedByRef
if (m_type_is_byref (type))
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (klass), error));
else if (m_class_get_element_class (klass) && MONO_CLASS_IS_ARRAY (klass))
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (m_class_get_element_class (klass)), error));
else if (m_class_get_element_class (klass) && type->type == MONO_TYPE_PTR)
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (m_class_get_element_class (klass)), error));
else
HANDLE_ON_STACK_SET (res, NULL);
}
void
ves_icall_RuntimeTypeHandle_GetBaseType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return;
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (!m_class_get_parent (klass))
return;
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (m_class_get_parent (klass)), error));
}
guint32
ves_icall_RuntimeTypeHandle_GetCorElementType (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return MONO_TYPE_BYREF;
else
return (guint32)type->type;
}
MonoBoolean
ves_icall_RuntimeTypeHandle_HasReferences (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass;
klass = mono_class_from_mono_type_internal (type);
mono_class_init_internal (klass);
return m_class_has_references (klass);
}
MonoBoolean
ves_icall_RuntimeTypeHandle_IsByRefLike (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
/* .NET Core says byref types are not IsByRefLike */
if (m_type_is_byref (type))
return FALSE;
MonoClass *klass = mono_class_from_mono_type_internal (type);
return m_class_is_byreflike (klass);
}
MonoBoolean
ves_icall_RuntimeTypeHandle_IsComObject (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_val_if_nok (error, FALSE);
return mono_class_is_com_object (klass);
}
guint32
ves_icall_reflection_get_token (MonoObjectHandle obj, MonoError *error)
{
return mono_reflection_get_token_checked (obj, error);
}
void
ves_icall_RuntimeTypeHandle_GetModule (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *t = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (t);
MonoReflectionModuleHandle module;
module = mono_module_get_object_handle (m_class_get_image (klass), error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (module));
}
void
ves_icall_RuntimeTypeHandle_GetAssembly (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *t = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (t);
MonoReflectionAssemblyHandle assembly;
assembly = mono_assembly_get_object_handle (m_class_get_image (klass)->assembly, error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (assembly));
}
void
ves_icall_RuntimeType_GetDeclaringType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass;
if (m_type_is_byref (type))
return;
if (type->type == MONO_TYPE_VAR) {
MonoGenericContainer *param = mono_type_get_generic_param_owner (type);
klass = param ? param->owner.klass : NULL;
} else if (type->type == MONO_TYPE_MVAR) {
MonoGenericContainer *param = mono_type_get_generic_param_owner (type);
klass = param ? param->owner.method->klass : NULL;
} else {
klass = m_class_get_nested_in (mono_class_from_mono_type_internal (type));
}
if (!klass)
return;
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (klass), error));
}
void
ves_icall_RuntimeType_GetName (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
// FIXME: this should be escaped in some scenarios with mono_identifier_escape_type_name_chars
// Determining exactly when to do so is fairly difficult, so for now we don't bother to avoid regressions
const char *klass_name = m_class_get_name (klass);
if (m_type_is_byref (type)) {
char *n = g_strdup_printf ("%s&", klass_name);
HANDLE_ON_STACK_SET (res, mono_string_new_checked (n, error));
g_free (n);
} else {
HANDLE_ON_STACK_SET (res, mono_string_new_checked (klass_name, error));
}
}
void
ves_icall_RuntimeType_GetNamespace (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoClass *klass_nested_in;
while ((klass_nested_in = m_class_get_nested_in (klass)))
klass = klass_nested_in;
if (m_class_get_name_space (klass) [0] == '\0')
return;
char *escaped = mono_identifier_escape_type_name_chars (m_class_get_name_space (klass));
HANDLE_ON_STACK_SET (res, mono_string_new_checked (escaped, error));
g_free (escaped);
}
gint32
ves_icall_RuntimeTypeHandle_GetArrayRank (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
if (type->type != MONO_TYPE_ARRAY && type->type != MONO_TYPE_SZARRAY) {
mono_error_set_argument (error, "type", "Type must be an array type");
return 0;
}
MonoClass *klass = mono_class_from_mono_type_internal (type);
return m_class_get_rank (klass);
}
static MonoArrayHandle
create_type_array (MonoBoolean runtimeTypeArray, int count, MonoError *error)
{
return mono_array_new_handle (runtimeTypeArray ? mono_defaults.runtimetype_class : mono_defaults.systemtype_class, count, error);
}
static gboolean
set_type_object_in_array (MonoType *type, MonoArrayHandle dest, int i, MonoError *error)
{
HANDLE_FUNCTION_ENTER();
MonoReflectionTypeHandle rt = mono_type_get_object_handle (type, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, i, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_RuntimeType_GetGenericArgumentsInternal (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res_handle, MonoBoolean runtimeTypeArray, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoArrayHandle res = MONO_HANDLE_NEW (MonoArray, NULL);
if (mono_class_is_gtd (klass)) {
MonoGenericContainer *container = mono_class_get_generic_container (klass);
MONO_HANDLE_ASSIGN (res, create_type_array (runtimeTypeArray, container->type_argc, error));
return_if_nok (error);
for (int i = 0; i < container->type_argc; ++i) {
MonoClass *pklass = mono_class_create_generic_parameter (mono_generic_container_get_param (container, i));
if (!set_type_object_in_array (m_class_get_byval_arg (pklass), res, i, error))
return;
}
} else if (mono_class_is_ginst (klass)) {
MonoGenericInst *inst = mono_class_get_generic_class (klass)->context.class_inst;
MONO_HANDLE_ASSIGN (res, create_type_array (runtimeTypeArray, inst->type_argc, error));
return_if_nok (error);
for (int i = 0; i < inst->type_argc; ++i) {
if (!set_type_object_in_array (inst->type_argv [i], res, i, error))
return;
}
}
HANDLE_ON_STACK_SET(res_handle, MONO_HANDLE_RAW (res));
}
MonoBoolean
ves_icall_RuntimeTypeHandle_IsGenericTypeDefinition (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return FALSE;
MonoClass *klass = mono_class_from_mono_type_internal (type);
return mono_class_is_gtd (klass);
}
void
ves_icall_RuntimeTypeHandle_GetGenericTypeDefinition_impl (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return;
MonoClass *klass;
klass = mono_class_from_mono_type_internal (type);
if (mono_class_is_gtd (klass)) {
HANDLE_ON_STACK_SET (res, NULL);
return;
}
if (mono_class_is_ginst (klass)) {
MonoClass *generic_class = mono_class_get_generic_class (klass)->container_class;
MonoGCHandle ref_info_handle = mono_class_get_ref_info_handle (generic_class);
if (m_class_was_typebuilder (generic_class) && ref_info_handle) {
MonoObjectHandle tb = mono_gchandle_get_target_handle (ref_info_handle);
g_assert (!MONO_HANDLE_IS_NULL (tb));
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (tb));
} else {
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (generic_class), error));
}
}
}
void
ves_icall_RuntimeType_MakeGenericType (MonoReflectionTypeHandle reftype, MonoArrayHandle type_array, MonoObjectHandleOnStack res, MonoError *error)
{
g_assert (IS_MONOTYPE_HANDLE (reftype));
MonoType *type = MONO_HANDLE_GETVAL (reftype, type);
mono_class_init_checked (mono_class_from_mono_type_internal (type), error);
return_if_nok (error);
int count = mono_array_handle_length (type_array);
MonoType **types = g_new0 (MonoType *, count);
MonoReflectionTypeHandle t = MONO_HANDLE_NEW (MonoReflectionType, NULL);
for (int i = 0; i < count; i++) {
MONO_HANDLE_ARRAY_GETREF (t, type_array, i);
types [i] = MONO_HANDLE_GETVAL (t, type);
}
MonoType *geninst = mono_reflection_bind_generic_parameters (reftype, count, types, error);
g_free (types);
if (!geninst)
return;
MonoClass *klass = mono_class_from_mono_type_internal (geninst);
/*we might inflate to the GTD*/
if (mono_class_is_ginst (klass) && !mono_verifier_class_is_valid_generic_instantiation (klass)) {
mono_error_set_argument (error, "typeArguments", "Invalid generic arguments");
return;
}
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (geninst, error));
}
MonoBoolean
ves_icall_RuntimeTypeHandle_HasInstantiation (MonoQCallTypeHandle type_handle)
{
MonoClass *klass;
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return FALSE;
klass = mono_class_from_mono_type_internal (type);
return mono_class_is_ginst (klass) || mono_class_is_gtd (klass);
}
gint32
ves_icall_RuntimeType_GetGenericParameterPosition (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (is_generic_parameter (type))
return mono_type_get_generic_param_num (type);
return -1;
}
MonoGenericParamInfo *
ves_icall_RuntimeTypeHandle_GetGenericParameterInfo (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
return mono_generic_param_info (type->data.generic_param);
}
MonoReflectionMethodHandle
ves_icall_RuntimeType_GetCorrespondingInflatedMethod (MonoQCallTypeHandle type_handle,
MonoReflectionMethodHandle generic,
MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
MonoMethod *generic_method = MONO_HANDLE_GETVAL (generic, method);
MonoReflectionMethodHandle ret = MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
MonoMethod *method;
gpointer iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
if (method->token == generic_method->token) {
ret = mono_method_get_object_handle (method, klass, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
}
}
return ret;
}
void
ves_icall_RuntimeType_GetDeclaringMethod (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type) || (type->type != MONO_TYPE_MVAR && type->type != MONO_TYPE_VAR)) {
mono_error_set_invalid_operation (error, "DeclaringMethod can only be used on generic arguments");
return;
}
if (type->type == MONO_TYPE_VAR)
return;
MonoMethod *method;
method = mono_type_get_generic_param_owner (type)->owner.method;
g_assert (method);
HANDLE_ON_STACK_SET (res, mono_method_get_object_checked (method, method->klass, error));
}
void
ves_icall_RuntimeMethodInfo_GetPInvoke (MonoReflectionMethodHandle ref_method, int* flags, MonoStringHandleOut entry_point, MonoStringHandleOut dll_name, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
MonoImage *image = m_class_get_image (method->klass);
MonoMethodPInvoke *piinfo = (MonoMethodPInvoke *)method;
MonoTableInfo *tables = image->tables;
MonoTableInfo *im = &tables [MONO_TABLE_IMPLMAP];
MonoTableInfo *mr = &tables [MONO_TABLE_MODULEREF];
guint32 im_cols [MONO_IMPLMAP_SIZE];
guint32 scope_token;
const char *import = NULL;
const char *scope = NULL;
if (image_is_dynamic (image)) {
MonoReflectionMethodAux *method_aux =
(MonoReflectionMethodAux *)g_hash_table_lookup (((MonoDynamicImage*)image)->method_aux_hash, method);
if (method_aux) {
import = method_aux->dllentry;
scope = method_aux->dll;
}
if (!import || !scope) {
mono_error_set_argument (error, "method", "System.Refleciton.Emit method with invalid pinvoke information");
return;
}
}
else {
if (piinfo->implmap_idx) {
mono_metadata_decode_row (im, piinfo->implmap_idx - 1, im_cols, MONO_IMPLMAP_SIZE);
piinfo->piflags = im_cols [MONO_IMPLMAP_FLAGS];
import = mono_metadata_string_heap (image, im_cols [MONO_IMPLMAP_NAME]);
scope_token = mono_metadata_decode_row_col (mr, im_cols [MONO_IMPLMAP_SCOPE] - 1, MONO_MODULEREF_NAME);
scope = mono_metadata_string_heap (image, scope_token);
}
}
*flags = piinfo->piflags;
MONO_HANDLE_ASSIGN (entry_point, mono_string_new_handle (import, error));
return_if_nok (error);
MONO_HANDLE_ASSIGN (dll_name, mono_string_new_handle (scope, error));
}
MonoReflectionMethodHandle
ves_icall_RuntimeMethodInfo_GetGenericMethodDefinition (MonoReflectionMethodHandle ref_method, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
if (method->is_generic)
return ref_method;
if (!method->is_inflated)
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
MonoMethodInflated *imethod = (MonoMethodInflated *) method;
MonoMethod *result = imethod->declaring;
/* Not a generic method. */
if (!result->is_generic)
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
if (image_is_dynamic (m_class_get_image (method->klass))) {
MonoDynamicImage *image = (MonoDynamicImage*)m_class_get_image (method->klass);
/*
* FIXME: Why is this stuff needed at all ? Why can't the code below work for
* the dynamic case as well ?
*/
mono_image_lock ((MonoImage*)image);
MonoReflectionMethodHandle res = MONO_HANDLE_NEW (MonoReflectionMethod, (MonoReflectionMethod*)mono_g_hash_table_lookup (image->generic_def_objects, imethod));
mono_image_unlock ((MonoImage*)image);
if (!MONO_HANDLE_IS_NULL (res))
return res;
}
if (imethod->context.class_inst) {
MonoClass *klass = ((MonoMethod *) imethod)->klass;
/*Generic methods gets the context of the GTD.*/
if (mono_class_get_context (klass)) {
result = mono_class_inflate_generic_method_full_checked (result, klass, mono_class_get_context (klass), error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
}
}
return mono_method_get_object_handle (result, NULL, error);
}
static GENERATE_TRY_GET_CLASS_WITH_CACHE (stream, "System.IO", "Stream")
static int io_stream_begin_read_slot = -1;
static int io_stream_begin_write_slot = -1;
static int io_stream_end_read_slot = -1;
static int io_stream_end_write_slot = -1;
static gboolean io_stream_slots_set = FALSE;
static void
init_io_stream_slots (void)
{
MonoClass* klass = mono_class_try_get_stream_class ();
mono_class_setup_vtable (klass);
MonoMethod **klass_methods = m_class_get_methods (klass);
if (!klass_methods) {
mono_class_setup_methods (klass);
klass_methods = m_class_get_methods (klass);
}
int method_count = mono_class_get_method_count (klass);
int methods_found = 0;
for (int i = 0; i < method_count; i++) {
// find slots for Begin(End)Read and Begin(End)Write
MonoMethod* m = klass_methods [i];
if (m->slot == -1)
continue;
if (!strcmp (m->name, "BeginRead")) {
methods_found++;
io_stream_begin_read_slot = m->slot;
} else if (!strcmp (m->name, "BeginWrite")) {
methods_found++;
io_stream_begin_write_slot = m->slot;
} else if (!strcmp (m->name, "EndRead")) {
methods_found++;
io_stream_end_read_slot = m->slot;
} else if (!strcmp (m->name, "EndWrite")) {
methods_found++;
io_stream_end_write_slot = m->slot;
}
}
g_assert (methods_found <= 4); // some of them can be linked out
io_stream_slots_set = TRUE;
}
MonoBoolean
ves_icall_System_IO_Stream_HasOverriddenBeginEndRead (MonoObjectHandle stream, MonoError *error)
{
MonoClass* curr_klass = MONO_HANDLE_GET_CLASS (stream);
MonoClass* base_klass = mono_class_try_get_stream_class ();
if (!io_stream_slots_set)
init_io_stream_slots ();
// slots can still be -1 and it means Linker removed the methods from the base class (Stream)
// in this case we can safely assume the methods are not overridden
// otherwise - check vtable
MonoMethod **curr_klass_vtable = m_class_get_vtable (curr_klass);
gboolean begin_read_is_overriden = io_stream_begin_read_slot != -1 && curr_klass_vtable [io_stream_begin_read_slot]->klass != base_klass;
gboolean end_read_is_overriden = io_stream_end_read_slot != -1 && curr_klass_vtable [io_stream_end_read_slot]->klass != base_klass;
// return true if BeginRead or EndRead were overriden
return begin_read_is_overriden || end_read_is_overriden;
}
MonoBoolean
ves_icall_System_IO_Stream_HasOverriddenBeginEndWrite (MonoObjectHandle stream, MonoError *error)
{
MonoClass* curr_klass = MONO_HANDLE_GETVAL (stream, vtable)->klass;
MonoClass* base_klass = mono_class_try_get_stream_class ();
if (!io_stream_slots_set)
init_io_stream_slots ();
// slots can still be -1 and it means Linker removed the methods from the base class (Stream)
// in this case we can safely assume the methods are not overridden
// otherwise - check vtable
MonoMethod **curr_klass_vtable = m_class_get_vtable (curr_klass);
gboolean begin_write_is_overriden = io_stream_begin_write_slot != -1 && curr_klass_vtable [io_stream_begin_write_slot]->klass != base_klass;
gboolean end_write_is_overriden = io_stream_end_write_slot != -1 && curr_klass_vtable [io_stream_end_write_slot]->klass != base_klass;
// return true if BeginWrite or EndWrite were overriden
return begin_write_is_overriden || end_write_is_overriden;
}
MonoBoolean
ves_icall_RuntimeMethodInfo_get_IsGenericMethod (MonoReflectionMethodHandle ref_method, MonoError *erro)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
return mono_method_signature_internal (method)->generic_param_count != 0;
}
MonoBoolean
ves_icall_RuntimeMethodInfo_get_IsGenericMethodDefinition (MonoReflectionMethodHandle ref_method, MonoError *Error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
return method->is_generic;
}
static gboolean
set_array_generic_argument_handle_inflated (MonoGenericInst *inst, int i, MonoArrayHandle arr, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoReflectionTypeHandle rt = mono_type_get_object_handle (inst->type_argv [i], error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (arr, i, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
static gboolean
set_array_generic_argument_handle_gparam (MonoGenericContainer *container, int i, MonoArrayHandle arr, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoGenericParam *param = mono_generic_container_get_param (container, i);
MonoClass *pklass = mono_class_create_generic_parameter (param);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (pklass), error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (arr, i, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
MonoArrayHandle
ves_icall_RuntimeMethodInfo_GetGenericArguments (MonoReflectionMethodHandle ref_method, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
if (method->is_inflated) {
MonoGenericInst *inst = mono_method_get_context (method)->method_inst;
if (inst) {
int count = inst->type_argc;
MonoArrayHandle res = mono_array_new_handle (mono_defaults.systemtype_class, count, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
for (int i = 0; i < count; i++) {
if (!set_array_generic_argument_handle_inflated (inst, i, res, error))
break;
}
return_val_if_nok (error, NULL_HANDLE_ARRAY);
return res;
}
}
int count = mono_method_signature_internal (method)->generic_param_count;
MonoArrayHandle res = mono_array_new_handle (mono_defaults.systemtype_class, count, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
MonoGenericContainer *container = mono_method_get_generic_container (method);
for (int i = 0; i < count; i++) {
if (!set_array_generic_argument_handle_gparam (container, i, res, error))
break;
}
return_val_if_nok (error, NULL_HANDLE_ARRAY);
return res;
}
MonoObjectHandle
ves_icall_InternalInvoke (MonoReflectionMethodHandle method_handle, MonoObjectHandle this_arg_handle,
MonoSpanOfObjects *params_span, MonoExceptionHandleOut exception_out, MonoError *error)
{
MonoReflectionMethod* const method = MONO_HANDLE_RAW (method_handle);
MonoObject* const this_arg = MONO_HANDLE_RAW (this_arg_handle);
g_assert (params_span != NULL);
/*
* Invoke from reflection is supposed to always be a virtual call (the API
* is stupid), mono_runtime_invoke_*() calls the provided method, allowing
* greater flexibility.
*/
MonoMethod *m = method->method;
MonoMethodSignature* const sig = mono_method_signature_internal (m);
int pcount = 0;
void *obj = this_arg;
MonoObject *result = NULL;
MonoArray *arr = NULL;
MonoException *exception = NULL;
*MONO_HANDLE_REF (exception_out) = NULL;
if (!(m->flags & METHOD_ATTRIBUTE_STATIC)) {
if (!mono_class_vtable_checked (m->klass, error)) {
mono_error_cleanup (error); /* FIXME does this make sense? */
error_init_reuse (error);
exception = mono_class_get_exception_for_failure (m->klass);
goto return_null;
}
if (this_arg) {
m = mono_object_get_virtual_method_internal (this_arg, m);
/* must pass the pointer to the value for valuetype methods */
if (m_class_is_valuetype (m->klass)) {
obj = mono_object_unbox_internal (this_arg);
// FIXMEcoop? Does obj need to be put into a handle?
}
} else if (strcmp (m->name, ".ctor") && !m->wrapper_type) {
exception = mono_exception_from_name_msg (mono_defaults.corlib, "System.Reflection", "TargetException", "Non-static method requires a target.");
goto return_null;
}
}
/* Array constructor */
if (m_class_get_rank (m->klass) && !strcmp (m->name, ".ctor")) {
int i;
pcount = mono_span_length (params_span);
uintptr_t * const lengths = g_newa (uintptr_t, pcount);
/* Note: the synthetized array .ctors have int32 as argument type */
for (i = 0; i < pcount; ++i)
lengths [i] = *(int32_t*) ((char*)mono_span_get (params_span, MonoObject*, i) + sizeof (MonoObject));
if (m_class_get_rank (m->klass) == 1 && sig->param_count == 2 && m_class_get_rank (m_class_get_element_class (m->klass))) {
/* This is a ctor for jagged arrays. MS creates an array of arrays. */
arr = mono_array_new_full_checked (m->klass, lengths, NULL, error);
goto_if_nok (error, return_null);
MonoArrayHandle subarray_handle = MONO_HANDLE_NEW (MonoArray, NULL);
for (i = 0; i < mono_array_length_internal (arr); ++i) {
MonoArray *subarray = mono_array_new_full_checked (m_class_get_element_class (m->klass), &lengths [1], NULL, error);
goto_if_nok (error, return_null);
MONO_HANDLE_ASSIGN_RAW (subarray_handle, subarray); // FIXME? Overkill?
mono_array_setref_fast (arr, i, subarray);
}
goto exit;
}
if (m_class_get_rank (m->klass) == pcount) {
/* Only lengths provided. */
arr = mono_array_new_full_checked (m->klass, lengths, NULL, error);
goto_if_nok (error, return_null);
goto exit;
} else {
g_assert (pcount == (m_class_get_rank (m->klass) * 2));
/* The arguments are lower-bound-length pairs */
intptr_t * const lower_bounds = (intptr_t *)g_alloca (sizeof (intptr_t) * pcount);
for (i = 0; i < pcount / 2; ++i) {
lower_bounds [i] = *(int32_t*) ((char*)mono_span_get (params_span, MonoObject*, (i * 2)) + sizeof (MonoObject));
lengths [i] = *(int32_t*) ((char*)mono_span_get (params_span, MonoObject*, (i * 2) + 1) + sizeof (MonoObject));
}
arr = mono_array_new_full_checked (m->klass, lengths, lower_bounds, error);
goto_if_nok (error, return_null);
goto exit;
}
}
result = mono_runtime_invoke_span_checked (m, obj, params_span, error);
goto exit;
return_null:
result = NULL;
arr = NULL;
exit:
if (exception) {
MONO_HANDLE_NEW (MonoException, exception); // FIXME? overkill?
mono_gc_wbarrier_generic_store_internal (MONO_HANDLE_REF (exception_out), (MonoObject*)exception);
}
g_assert (!result || !arr); // only one, or neither, should be set
return result ? MONO_HANDLE_NEW (MonoObject, result) : arr ? MONO_HANDLE_NEW (MonoObject, (MonoObject*)arr) : NULL_HANDLE;
}
static guint64
read_enum_value (const char *mem, int type)
{
switch (type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
return *(guint8*)mem;
case MONO_TYPE_I1:
return *(gint8*)mem;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
return read16 (mem);
case MONO_TYPE_I2:
return (gint16) read16 (mem);
case MONO_TYPE_U4:
case MONO_TYPE_R4:
return read32 (mem);
case MONO_TYPE_I4:
return (gint32) read32 (mem);
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8:
return read64 (mem);
case MONO_TYPE_U:
case MONO_TYPE_I:
#if SIZEOF_REGISTER == 8
return read64 (mem);
#else
return read32 (mem);
#endif
default:
g_assert_not_reached ();
}
return 0;
}
static void
write_enum_value (void *mem, int type, guint64 value)
{
switch (type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN: {
guint8 *p = (guint8*)mem;
*p = value;
break;
}
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR: {
guint16 *p = (guint16 *)mem;
*p = value;
break;
}
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4: {
guint32 *p = (guint32 *)mem;
*p = value;
break;
}
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8: {
guint64 *p = (guint64 *)mem;
*p = value;
break;
}
case MONO_TYPE_U:
case MONO_TYPE_I: {
#if SIZEOF_REGISTER == 8
guint64 *p = (guint64 *)mem;
*p = value;
#else
guint32 *p = (guint32 *)mem;
*p = value;
break;
#endif
break;
}
default:
g_assert_not_reached ();
}
return;
}
void
ves_icall_System_Enum_InternalBoxEnum (MonoQCallTypeHandle enum_handle, MonoObjectHandleOnStack res, guint64 value, MonoError *error)
{
MonoClass *enumc;
MonoObjectHandle resultHandle;
MonoType *etype;
enumc = mono_class_from_mono_type_internal (enum_handle.type);
mono_class_init_checked (enumc, error);
return_if_nok (error);
etype = mono_class_enum_basetype_internal (enumc);
resultHandle = mono_object_new_handle (enumc, error);
return_if_nok (error);
write_enum_value (mono_handle_unbox_unsafe (resultHandle), etype->type, value);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (resultHandle));
}
void
ves_icall_System_Enum_InternalGetUnderlyingType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *etype;
MonoClass *klass;
klass = mono_class_from_mono_type_internal (type_handle.type);
mono_class_init_checked (klass, error);
return_if_nok (error);
etype = mono_class_enum_basetype_internal (klass);
if (!etype) {
mono_error_set_argument (error, "enumType", "Type provided must be an Enum.");
return;
}
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (etype, error));
}
int
ves_icall_System_Enum_InternalGetCorElementType (MonoQCallTypeHandle type_handle)
{
MonoClass *klass = mono_class_from_mono_type_internal (type_handle.type);
return (int)m_class_get_byval_arg (m_class_get_element_class (klass))->type;
}
static void
get_enum_field (MonoArrayHandle names, MonoArrayHandle values, int base_type, MonoClassField *field, guint* j, guint64 *previous_value, gboolean *sorted, MonoError *error)
{
HANDLE_FUNCTION_ENTER();
guint64 field_value;
const char *p;
MonoTypeEnum def_type;
if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC))
goto leave;
if (strcmp ("value__", mono_field_get_name (field)) == 0)
goto leave;
if (mono_field_is_deleted (field))
goto leave;
MonoStringHandle name;
name = mono_string_new_handle (mono_field_get_name (field), error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (names, *j, name);
p = mono_class_get_field_default_value (field, &def_type);
/* len = */ mono_metadata_decode_blob_size (p, &p);
field_value = read_enum_value (p, base_type);
MONO_HANDLE_ARRAY_SETVAL (values, guint64, *j, field_value);
if (*previous_value > field_value)
*sorted = FALSE;
*previous_value = field_value;
(*j)++;
leave:
HANDLE_FUNCTION_RETURN();
}
MonoBoolean
ves_icall_System_Enum_GetEnumValuesAndNames (MonoQCallTypeHandle type_handle, MonoArrayHandleOut values, MonoArrayHandleOut names, MonoError *error)
{
MonoClass *enumc = mono_class_from_mono_type_internal (type_handle.type);
guint j = 0, nvalues;
gpointer iter;
MonoClassField *field;
int base_type;
guint64 previous_value = 0;
gboolean sorted = TRUE;
mono_class_init_checked (enumc, error);
return_val_if_nok (error, FALSE);
if (!m_class_is_enumtype (enumc)) {
mono_error_set_argument (error, NULL, "Type provided must be an Enum.");
return TRUE;
}
base_type = mono_class_enum_basetype_internal (enumc)->type;
nvalues = mono_class_num_fields (enumc) > 0 ? mono_class_num_fields (enumc) - 1 : 0;
MONO_HANDLE_ASSIGN(names, mono_array_new_handle (mono_defaults.string_class, nvalues, error));
return_val_if_nok (error, FALSE);
MONO_HANDLE_ASSIGN(values, mono_array_new_handle (mono_defaults.uint64_class, nvalues, error));
return_val_if_nok (error, FALSE);
iter = NULL;
while ((field = mono_class_get_fields_internal (enumc, &iter))) {
get_enum_field (names, values, base_type, field, &j, &previous_value, &sorted, error);
if (!is_ok (error))
break;
}
return_val_if_nok (error, FALSE);
return sorted || base_type == MONO_TYPE_R4 || base_type == MONO_TYPE_R8;
}
enum {
BFLAGS_IgnoreCase = 1,
BFLAGS_DeclaredOnly = 2,
BFLAGS_Instance = 4,
BFLAGS_Static = 8,
BFLAGS_Public = 0x10,
BFLAGS_NonPublic = 0x20,
BFLAGS_FlattenHierarchy = 0x40,
BFLAGS_InvokeMethod = 0x100,
BFLAGS_CreateInstance = 0x200,
BFLAGS_GetField = 0x400,
BFLAGS_SetField = 0x800,
BFLAGS_GetProperty = 0x1000,
BFLAGS_SetProperty = 0x2000,
BFLAGS_ExactBinding = 0x10000,
BFLAGS_SuppressChangeType = 0x20000,
BFLAGS_OptionalParamBinding = 0x40000
};
enum {
MLISTTYPE_All = 0,
MLISTTYPE_CaseSensitive = 1,
MLISTTYPE_CaseInsensitive = 2,
MLISTTYPE_HandleToInfo = 3
};
GPtrArray*
ves_icall_RuntimeType_GetFields_native (MonoQCallTypeHandle type_handle, char *utf8_name, guint32 bflags, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
int (*compare_func) (const char *s1, const char *s2) = NULL;
compare_func = ((bflags & BFLAGS_IgnoreCase) || (mlisttype == MLISTTYPE_CaseInsensitive)) ? mono_utf8_strcasecmp : strcmp;
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
GPtrArray *ptr_array = g_ptr_array_sized_new (16);
handle_parent:
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
goto fail;
}
MonoClassField *field;
gpointer iter;
iter = NULL;
while ((field = mono_class_get_fields_lazy (klass, &iter))) {
guint32 flags = mono_field_get_flags (field);
int match = 0;
if (mono_field_is_deleted_with_flags (field, flags))
continue;
if ((flags & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) == FIELD_ATTRIBUTE_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else if ((klass == startklass) || (flags & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) != FIELD_ATTRIBUTE_PRIVATE) {
if (bflags & BFLAGS_NonPublic) {
match++;
}
}
if (!match)
continue;
match = 0;
if (flags & FIELD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
if (((mlisttype != MLISTTYPE_All) && (utf8_name != NULL)) && compare_func (mono_field_get_name (field), utf8_name))
continue;
g_ptr_array_add (ptr_array, field);
}
if (!(bflags & BFLAGS_DeclaredOnly) && (klass = m_class_get_parent (klass)))
goto handle_parent;
return ptr_array;
fail:
g_ptr_array_free (ptr_array, TRUE);
return NULL;
}
static gboolean
method_nonpublic (MonoMethod* method, gboolean start_klass)
{
switch (method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) {
case METHOD_ATTRIBUTE_ASSEM:
return TRUE;
case METHOD_ATTRIBUTE_PRIVATE:
return start_klass;
case METHOD_ATTRIBUTE_PUBLIC:
return FALSE;
default:
return TRUE;
}
}
GPtrArray*
mono_class_get_methods_by_name (MonoClass *klass, const char *name, guint32 bflags, guint32 mlisttype, gboolean allow_ctors, MonoError *error)
{
GPtrArray *array;
MonoClass *startklass;
MonoMethod *method;
gpointer iter;
int match, nslots;
/*FIXME, use MonoBitSet*/
guint32 method_slots_default [8];
guint32 *method_slots = NULL;
int (*compare_func) (const char *s1, const char *s2) = NULL;
array = g_ptr_array_new ();
startklass = klass;
compare_func = ((bflags & BFLAGS_IgnoreCase) || (mlisttype == MLISTTYPE_CaseInsensitive)) ? mono_utf8_strcasecmp : strcmp;
/* An optimization for calls made from Delegate:CreateDelegate () */
if (m_class_is_delegate (klass) && klass != mono_defaults.delegate_class && klass != mono_defaults.multicastdelegate_class && name && !strcmp (name, "Invoke") && (bflags == (BFLAGS_Public | BFLAGS_Static | BFLAGS_Instance))) {
method = mono_get_delegate_invoke_internal (klass);
g_assert (method);
g_ptr_array_add (array, method);
return array;
}
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass))
goto loader_error;
if (is_generic_parameter (m_class_get_byval_arg (klass)))
nslots = mono_class_get_vtable_size (m_class_get_parent (klass));
else
nslots = MONO_CLASS_IS_INTERFACE_INTERNAL (klass) ? mono_class_num_methods (klass) : mono_class_get_vtable_size (klass);
if (nslots >= sizeof (method_slots_default) * 8) {
method_slots = g_new0 (guint32, nslots / 32 + 1);
} else {
method_slots = method_slots_default;
memset (method_slots, 0, sizeof (method_slots_default));
}
handle_parent:
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass))
goto loader_error;
iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
match = 0;
if (method->slot != -1) {
g_assert (method->slot < nslots);
if (method_slots [method->slot >> 5] & (1 << (method->slot & 0x1f)))
continue;
if (!(method->flags & METHOD_ATTRIBUTE_NEW_SLOT))
method_slots [method->slot >> 5] |= 1 << (method->slot & 0x1f);
}
if (!allow_ctors && method->name [0] == '.' && (strcmp (method->name, ".ctor") == 0 || strcmp (method->name, ".cctor") == 0))
continue;
if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else if ((bflags & BFLAGS_NonPublic) && method_nonpublic (method, (klass == startklass))) {
match++;
}
if (!match)
continue;
match = 0;
if (method->flags & METHOD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
if ((mlisttype != MLISTTYPE_All) && (name != NULL)) {
if (compare_func (name, method->name))
continue;
}
match = 0;
g_ptr_array_add (array, method);
}
if (!(bflags & BFLAGS_DeclaredOnly) && (klass = m_class_get_parent (klass)))
goto handle_parent;
if (method_slots != method_slots_default)
g_free (method_slots);
return array;
loader_error:
if (method_slots != method_slots_default)
g_free (method_slots);
g_ptr_array_free (array, TRUE);
g_assert (mono_class_has_failure (klass));
mono_error_set_for_class_failure (error, klass);
return NULL;
}
GPtrArray*
ves_icall_RuntimeType_GetMethodsByName_native (MonoQCallTypeHandle type_handle, const char *mname, guint32 bflags, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (m_type_is_byref (type))
return g_ptr_array_new ();
return mono_class_get_methods_by_name (klass, mname, bflags, mlisttype, FALSE, error);
}
GPtrArray*
ves_icall_RuntimeType_GetConstructors_native (MonoQCallTypeHandle type_handle, guint32 bflags, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type)) {
return g_ptr_array_new ();
}
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
mono_class_setup_methods (klass);
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
return NULL;
}
GPtrArray *res_array = g_ptr_array_sized_new (4); /* FIXME, guestimating */
MonoMethod *method;
gpointer iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
int match = 0;
if (strcmp (method->name, ".ctor") && strcmp (method->name, ".cctor"))
continue;
if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else {
if (bflags & BFLAGS_NonPublic)
match++;
}
if (!match)
continue;
match = 0;
if (method->flags & METHOD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
g_ptr_array_add (res_array, method);
}
return res_array;
}
static guint
property_hash (gconstpointer data)
{
MonoProperty *prop = (MonoProperty*)data;
return g_str_hash (prop->name);
}
static gboolean
property_accessor_override (MonoMethod *method1, MonoMethod *method2)
{
if (method1->slot != -1 && method1->slot == method2->slot)
return TRUE;
if (mono_class_get_generic_type_definition (method1->klass) == mono_class_get_generic_type_definition (method2->klass)) {
if (method1->is_inflated)
method1 = ((MonoMethodInflated*) method1)->declaring;
if (method2->is_inflated)
method2 = ((MonoMethodInflated*) method2)->declaring;
}
return mono_metadata_signature_equal (mono_method_signature_internal (method1), mono_method_signature_internal (method2));
}
static gboolean
property_equal (MonoProperty *prop1, MonoProperty *prop2)
{
// Properties are hide-by-name-and-signature
if (!g_str_equal (prop1->name, prop2->name))
return FALSE;
/* If we see a property in a generic method, we want to
compare the generic signatures, not the inflated signatures
because we might conflate two properties that were
distinct:
class Foo<T,U> {
public T this[T t] { getter { return t; } } // method 1
public U this[U u] { getter { return u; } } // method 2
}
If we see int Foo<int,int>::Item[int] we need to know if
the indexer came from method 1 or from method 2, and we
shouldn't conflate them. (Bugzilla 36283)
*/
if (prop1->get && prop2->get && !property_accessor_override (prop1->get, prop2->get))
return FALSE;
if (prop1->set && prop2->set && !property_accessor_override (prop1->set, prop2->set))
return FALSE;
return TRUE;
}
static gboolean
property_accessor_nonpublic (MonoMethod* accessor, gboolean start_klass)
{
if (!accessor)
return FALSE;
return method_nonpublic (accessor, start_klass);
}
GPtrArray*
ves_icall_RuntimeType_GetPropertiesByName_native (MonoQCallTypeHandle type_handle, gchar *propname, guint32 bflags, guint32 mlisttype, MonoError *error)
{
// Fetch non-public properties as well because they can hide public properties with the same name in base classes
bflags |= BFLAGS_NonPublic;
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
int (*compare_func) (const char *s1, const char *s2) = (mlisttype == MLISTTYPE_CaseInsensitive) ? mono_utf8_strcasecmp : strcmp;
GPtrArray *res_array = g_ptr_array_sized_new (8); /*This the average for ASP.NET types*/
GHashTable *properties = g_hash_table_new (property_hash, (GEqualFunc)property_equal);
handle_parent:
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
goto loader_error;
}
MonoProperty *prop;
gpointer iter;
iter = NULL;
while ((prop = mono_class_get_properties (klass, &iter))) {
int match = 0;
MonoMethod *method = prop->get;
if (!method)
method = prop->set;
guint32 flags = 0;
if (method)
flags = method->flags;
if ((prop->get && ((prop->get->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC)) ||
(prop->set && ((prop->set->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC))) {
if (bflags & BFLAGS_Public)
match++;
} else if (bflags & BFLAGS_NonPublic) {
if (property_accessor_nonpublic(prop->get, startklass == klass) ||
property_accessor_nonpublic(prop->set, startklass == klass)) {
match++;
}
}
if (!match)
continue;
match = 0;
if (flags & METHOD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
match = 0;
if ((mlisttype != MLISTTYPE_All) && (propname != NULL) && compare_func (propname, prop->name))
continue;
if (g_hash_table_lookup (properties, prop))
continue;
g_ptr_array_add (res_array, prop);
g_hash_table_insert (properties, prop, prop);
}
if (!(bflags & BFLAGS_DeclaredOnly) && (klass = m_class_get_parent (klass))) {
// BFLAGS_NonPublic should be excluded for base classes
bflags &= ~BFLAGS_NonPublic;
goto handle_parent;
}
g_hash_table_destroy (properties);
return res_array;
loader_error:
if (properties)
g_hash_table_destroy (properties);
g_ptr_array_free (res_array, TRUE);
return NULL;
}
static guint
event_hash (gconstpointer data)
{
MonoEvent *event = (MonoEvent*)data;
return g_str_hash (event->name);
}
static gboolean
event_equal (MonoEvent *event1, MonoEvent *event2)
{
// Events are hide-by-name
return g_str_equal (event1->name, event2->name);
}
GPtrArray*
ves_icall_RuntimeType_GetEvents_native (MonoQCallTypeHandle type_handle, char *utf8_name, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
int (*compare_func) (const char *s1, const char *s2) = (mlisttype == MLISTTYPE_CaseInsensitive) ? mono_utf8_strcasecmp : strcmp;
GPtrArray *res_array = g_ptr_array_sized_new (4);
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
GHashTable *events = g_hash_table_new (event_hash, (GEqualFunc)event_equal);
handle_parent:
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
goto failure;
}
MonoEvent *event;
gpointer iter;
iter = NULL;
while ((event = mono_class_get_events (klass, &iter))) {
// Remove inherited privates and inherited
// without add/remove/raise methods
if (klass != startklass)
{
MonoMethod *method = event->add;
if (!method)
method = event->remove;
if (!method)
method = event->raise;
if (!method)
continue;
if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PRIVATE)
continue;
}
if ((mlisttype != MLISTTYPE_All) && (utf8_name != NULL) && compare_func (event->name, utf8_name))
continue;
if (g_hash_table_lookup (events, event))
continue;
g_ptr_array_add (res_array, event);
g_hash_table_insert (events, event, event);
}
if ((klass = m_class_get_parent (klass)))
goto handle_parent;
g_hash_table_destroy (events);
return res_array;
failure:
if (events != NULL)
g_hash_table_destroy (events);
g_ptr_array_free (res_array, TRUE);
return NULL;
}
GPtrArray *
ves_icall_RuntimeType_GetNestedTypes_native (MonoQCallTypeHandle type_handle, char *str, guint32 bflags, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
int (*compare_func) (const char *s1, const char *s2) = ((bflags & BFLAGS_IgnoreCase) || (mlisttype == MLISTTYPE_CaseInsensitive)) ? mono_utf8_strcasecmp : strcmp;
MonoClass *klass = mono_class_from_mono_type_internal (type);
/*
* If a nested type is generic, return its generic type definition.
* Note that this means that the return value is essentially the set
* of nested types of the generic type definition of @klass.
*
* A note in MSDN claims that a generic type definition can have
* nested types that aren't generic. In any case, the container of that
* nested type would be the generic type definition.
*/
if (mono_class_is_ginst (klass))
klass = mono_class_get_generic_class (klass)->container_class;
GPtrArray *res_array = g_ptr_array_new ();
MonoClass *nested;
gpointer iter = NULL;
while ((nested = mono_class_get_nested_types (klass, &iter))) {
int match = 0;
if ((mono_class_get_flags (nested) & TYPE_ATTRIBUTE_VISIBILITY_MASK) == TYPE_ATTRIBUTE_NESTED_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else {
if (bflags & BFLAGS_NonPublic)
match++;
}
if (!match)
continue;
if ((mlisttype != MLISTTYPE_All) && (str != NULL) && compare_func (m_class_get_name (nested), str))
continue;
g_ptr_array_add (res_array, m_class_get_byval_arg (nested));
}
return res_array;
}
static MonoType*
get_type_from_module_builder_module (MonoAssemblyLoadContext *alc, MonoArrayHandle modules, int i, MonoTypeNameParse *info, MonoBoolean ignoreCase, gboolean *type_resolve, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoType *type = NULL;
MonoReflectionModuleBuilderHandle mb = MONO_HANDLE_NEW (MonoReflectionModuleBuilder, NULL);
MONO_HANDLE_ARRAY_GETREF (mb, modules, i);
MonoDynamicImage *dynamic_image = MONO_HANDLE_GETVAL (mb, dynamic_image);
type = mono_reflection_get_type_checked (alc, &dynamic_image->image, &dynamic_image->image, info, ignoreCase, FALSE, type_resolve, error);
HANDLE_FUNCTION_RETURN_VAL (type);
}
static MonoType*
get_type_from_module_builder_loaded_modules (MonoAssemblyLoadContext *alc, MonoArrayHandle loaded_modules, int i, MonoTypeNameParse *info, MonoBoolean ignoreCase, gboolean *type_resolve, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoType *type = NULL;
MonoReflectionModuleHandle mod = MONO_HANDLE_NEW (MonoReflectionModule, NULL);
MONO_HANDLE_ARRAY_GETREF (mod, loaded_modules, i);
MonoImage *image = MONO_HANDLE_GETVAL (mod, image);
type = mono_reflection_get_type_checked (alc, image, image, info, ignoreCase, FALSE, type_resolve, error);
HANDLE_FUNCTION_RETURN_VAL (type);
}
MonoReflectionTypeHandle
ves_icall_System_Reflection_Assembly_InternalGetType (MonoReflectionAssemblyHandle assembly_h, MonoReflectionModuleHandle module, MonoStringHandle name, MonoBoolean throwOnError, MonoBoolean ignoreCase, MonoError *error)
{
ERROR_DECL (parse_error);
MonoTypeNameParse info;
gboolean type_resolve;
MonoAssemblyLoadContext *alc = mono_alc_get_ambient ();
/* On MS.NET, this does not fire a TypeResolve event */
type_resolve = TRUE;
char *str = mono_string_handle_to_utf8 (name, error);
goto_if_nok (error, fail);
/*g_print ("requested type %s in %s\n", str, assembly->assembly->aname.name);*/
if (!mono_reflection_parse_type_checked (str, &info, parse_error)) {
g_free (str);
mono_reflection_free_type_info (&info);
mono_error_cleanup (parse_error);
if (throwOnError) {
mono_error_set_argument (error, "typeName@0", "failed to parse the type");
goto fail;
}
/*g_print ("failed parse\n");*/
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
if (info.assembly.name) {
g_free (str);
mono_reflection_free_type_info (&info);
if (throwOnError) {
mono_error_set_argument (error, NULL, "Type names passed to Assembly.GetType() must not specify an assembly.");
goto fail;
}
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
MonoType *type;
type = NULL;
if (!MONO_HANDLE_IS_NULL (module)) {
MonoImage *image = MONO_HANDLE_GETVAL (module, image);
if (image) {
type = mono_reflection_get_type_checked (alc, image, image, &info, ignoreCase, FALSE, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
}
}
else {
MonoAssembly *assembly = MONO_HANDLE_GETVAL (assembly_h, assembly);
if (assembly_is_dynamic (assembly)) {
/* Enumerate all modules */
MonoReflectionAssemblyBuilderHandle abuilder = MONO_HANDLE_NEW (MonoReflectionAssemblyBuilder, NULL);
MONO_HANDLE_ASSIGN (abuilder, assembly_h);
int i;
MonoArrayHandle modules = MONO_HANDLE_NEW (MonoArray, NULL);
MONO_HANDLE_GET (modules, abuilder, modules);
if (!MONO_HANDLE_IS_NULL (modules)) {
int n = mono_array_handle_length (modules);
for (i = 0; i < n; ++i) {
type = get_type_from_module_builder_module (alc, modules, i, &info, ignoreCase, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
if (type)
break;
}
}
MonoArrayHandle loaded_modules = MONO_HANDLE_NEW (MonoArray, NULL);
MONO_HANDLE_GET (loaded_modules, abuilder, loaded_modules);
if (!type && !MONO_HANDLE_IS_NULL (loaded_modules)) {
int n = mono_array_handle_length (loaded_modules);
for (i = 0; i < n; ++i) {
type = get_type_from_module_builder_loaded_modules (alc, loaded_modules, i, &info, ignoreCase, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
if (type)
break;
}
}
}
else {
type = mono_reflection_get_type_checked (alc, assembly->image, assembly->image, &info, ignoreCase, FALSE, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
}
}
g_free (str);
mono_reflection_free_type_info (&info);
if (!type) {
if (throwOnError) {
ERROR_DECL (inner_error);
char *type_name = mono_string_handle_to_utf8 (name, inner_error);
mono_error_assert_ok (inner_error);
MonoAssembly *assembly = MONO_HANDLE_GETVAL (assembly_h, assembly);
char *assmname = mono_stringify_assembly_name (&assembly->aname);
mono_error_set_type_load_name (error, type_name, assmname, "%s", "");
goto fail;
}
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
if (type->type == MONO_TYPE_CLASS) {
MonoClass *klass = mono_type_get_class_internal (type);
/* need to report exceptions ? */
if (throwOnError && mono_class_has_failure (klass)) {
/* report SecurityException (or others) that occured when loading the assembly */
mono_error_set_for_class_failure (error, klass);
goto fail;
}
}
/* g_print ("got it\n"); */
return mono_type_get_object_handle (type, error);
fail:
g_assert (!is_ok (error));
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
/* This corresponds to RuntimeAssembly.AssemblyInfoKind */
typedef enum {
ASSEMBLY_INFO_KIND_LOCATION = 1,
ASSEMBLY_INFO_KIND_CODEBASE = 2,
ASSEMBLY_INFO_KIND_FULLNAME = 3,
ASSEMBLY_INFO_KIND_VERSION = 4
} MonoAssemblyInfoKind;
void
ves_icall_System_Reflection_RuntimeAssembly_GetInfo (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, guint32 int_kind, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoAssemblyInfoKind kind = (MonoAssemblyInfoKind)int_kind;
switch (kind) {
case ASSEMBLY_INFO_KIND_LOCATION: {
const char *image_name = m_image_get_filename (assembly->image);
HANDLE_ON_STACK_SET (res, mono_string_new_checked (image_name != NULL ? image_name : "", error));
break;
}
case ASSEMBLY_INFO_KIND_CODEBASE: {
/* return NULL for bundled assemblies in single-file scenarios */
const char* filename = m_image_get_filename (assembly->image);
if (!filename)
break;
gchar *absolute;
if (g_path_is_absolute (filename))
absolute = g_strdup (filename);
else
absolute = g_build_filename (assembly->basedir, filename, (const char*)NULL);
mono_icall_make_platform_path (absolute);
const gchar *prepend = mono_icall_get_file_path_prefix (absolute);
gchar *uri = g_strconcat (prepend, absolute, (const char*)NULL);
g_free (absolute);
if (uri) {
HANDLE_ON_STACK_SET (res, mono_string_new_checked (uri, error));
g_free (uri);
return_if_nok (error);
}
break;
}
case ASSEMBLY_INFO_KIND_FULLNAME: {
char *name = mono_stringify_assembly_name (&assembly->aname);
HANDLE_ON_STACK_SET (res, mono_string_new_checked (name, error));
g_free (name);
return_if_nok (error);
break;
}
case ASSEMBLY_INFO_KIND_VERSION: {
HANDLE_ON_STACK_SET (res, mono_string_new_checked (assembly->image->version, error));
return_if_nok (error);
break;
}
default:
g_assert_not_reached ();
}
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetEntryPoint (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoMethod *method;
guint32 token = mono_image_get_entry_point (assembly->image);
if (!token)
return;
method = mono_get_method_checked (assembly->image, token, NULL, NULL, error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, mono_method_get_object_checked (method, NULL, error));
}
void
ves_icall_System_Reflection_Assembly_GetManifestModuleInternal (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, MonoError *error)
{
MonoAssembly *a = assembly_h.assembly;
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (mono_module_get_object_handle (a->image, error)));
}
static gboolean
add_manifest_resource_name_to_array (MonoImage *image, MonoTableInfo *table, int i, MonoArrayHandle dest, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
const char *val = mono_metadata_string_heap (image, mono_metadata_decode_row_col (table, i, MONO_MANIFEST_NAME));
MonoStringHandle str = mono_string_new_handle (val, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, i, str);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceNames (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoTableInfo *table = &assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE];
/* FIXME: metadata-update */
int rows = table_info_get_rows (table);
MonoArrayHandle result = mono_array_new_handle (mono_defaults.string_class, rows, error);
return_if_nok (error);
for (int i = 0; i < rows; ++i) {
if (!add_manifest_resource_name_to_array (assembly->image, table, i, result, error))
return;
}
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (result));
}
static MonoAssemblyName*
create_referenced_assembly_name (MonoImage *image, int i, MonoError *error)
{
MonoAssemblyName *aname = g_new0 (MonoAssemblyName, 1);
mono_assembly_get_assemblyref_checked (image, i, aname, error);
return_val_if_nok (error, NULL);
aname->hash_alg = ASSEMBLY_HASH_SHA1 /* SHA1 (default) */;
/* name and culture are pointers into the image tables, but we need
* real malloc'd strings (so that we can g_free() them later from
* Mono.RuntimeMarshal.FreeAssemblyName) */
aname->name = g_strdup (aname->name);
aname->culture = g_strdup (aname->culture);
/* Don't need the hash value in managed */
aname->hash_value = NULL;
aname->hash_len = 0;
g_assert (aname->public_key == NULL);
/* note: this function doesn't return the codebase on purpose (i.e. it can
be used under partial trust as path information isn't present). */
return aname;
}
GPtrArray*
ves_icall_System_Reflection_Assembly_InternalGetReferencedAssemblies (MonoReflectionAssemblyHandle assembly_h, MonoError *error)
{
MonoAssembly *assembly = MONO_HANDLE_GETVAL (assembly_h, assembly);
MonoImage *image = assembly->image;
int count;
/* FIXME: metadata-update */
if (image_is_dynamic (assembly->image)) {
MonoDynamicTable *t = &(((MonoDynamicImage*) image)->tables [MONO_TABLE_ASSEMBLYREF]);
count = t->rows;
}
else {
MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF];
count = table_info_get_rows (t);
}
GPtrArray *result = g_ptr_array_sized_new (count);
for (int i = 0; i < count; i++) {
MonoAssemblyName *aname = create_referenced_assembly_name (image, i, error);
if (!is_ok (error))
break;
g_ptr_array_add (result, aname);
}
return result;
}
/* move this in some file in mono/util/ */
static char *
g_concat_dir_and_file (const char *dir, const char *file)
{
g_return_val_if_fail (dir != NULL, NULL);
g_return_val_if_fail (file != NULL, NULL);
/*
* If the directory name doesn't have a / on the end, we need
* to add one so we get a proper path to the file
*/
if (dir [strlen(dir) - 1] != G_DIR_SEPARATOR)
return g_strconcat (dir, G_DIR_SEPARATOR_S, file, (const char*)NULL);
else
return g_strconcat (dir, file, (const char*)NULL);
}
static MonoReflectionAssemblyHandle
try_resource_resolve_name (MonoReflectionAssemblyHandle assembly_handle, MonoStringHandle name_handle)
{
MonoObjectHandle ret;
ERROR_DECL (error);
HANDLE_FUNCTION_ENTER ();
if (mono_runtime_get_no_exec ())
goto return_null;
MONO_STATIC_POINTER_INIT (MonoMethod, resolve_method)
static gboolean inited;
if (!inited) {
MonoClass *alc_class = mono_class_get_assembly_load_context_class ();
g_assert (alc_class);
resolve_method = mono_class_get_method_from_name_checked (alc_class, "OnResourceResolve", -1, 0, error);
inited = TRUE;
}
mono_error_cleanup (error);
error_init_reuse (error);
MONO_STATIC_POINTER_INIT_END (MonoMethod, resolve_method)
if (!resolve_method)
goto return_null;
gpointer args [2];
args [0] = MONO_HANDLE_RAW (assembly_handle);
args [1] = MONO_HANDLE_RAW (name_handle);
ret = mono_runtime_try_invoke_handle (resolve_method, NULL_HANDLE, args, error);
goto_if_nok (error, return_null);
goto exit;
return_null:
ret = NULL_HANDLE;
exit:
HANDLE_FUNCTION_RETURN_REF (MonoReflectionAssembly, MONO_HANDLE_CAST (MonoReflectionAssembly, ret));
}
void *
ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInternal (MonoQCallAssemblyHandle assembly_h, MonoStringHandle name, gint32 *size, MonoObjectHandleOnStack ref_module, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoTableInfo *table = &assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE];
guint32 i;
guint32 cols [MONO_MANIFEST_SIZE];
guint32 impl, file_idx;
const char *val;
MonoImage *module;
char *n = mono_string_handle_to_utf8 (name, error);
return_val_if_nok (error, NULL);
/* FIXME: metadata update */
int rows = table_info_get_rows (table);
for (i = 0; i < rows; ++i) {
mono_metadata_decode_row (table, i, cols, MONO_MANIFEST_SIZE);
val = mono_metadata_string_heap (assembly->image, cols [MONO_MANIFEST_NAME]);
if (strcmp (val, n) == 0)
break;
}
g_free (n);
if (i == rows)
return NULL;
/* FIXME */
impl = cols [MONO_MANIFEST_IMPLEMENTATION];
if (impl) {
/*
* this code should only be called after obtaining the
* ResourceInfo and handling the other cases.
*/
g_assert ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_FILE);
file_idx = impl >> MONO_IMPLEMENTATION_BITS;
module = mono_image_load_file_for_image_checked (assembly->image, file_idx, error);
if (!is_ok (error) || !module)
return NULL;
} else {
module = assembly->image;
}
MonoReflectionModuleHandle rm = mono_module_get_object_handle (module, error);
return_val_if_nok (error, NULL);
HANDLE_ON_STACK_SET (ref_module, MONO_HANDLE_RAW (rm));
return (void*)mono_image_get_resource (module, cols [MONO_MANIFEST_OFFSET], (guint32*)size);
}
static gboolean
get_manifest_resource_info_internal (MonoAssembly *assembly, MonoStringHandle name, MonoManifestResourceInfoHandle info, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoTableInfo *table = &assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE];
int i;
guint32 cols [MONO_MANIFEST_SIZE];
guint32 file_cols [MONO_FILE_SIZE];
const char *val;
char *n;
gboolean result = FALSE;
n = mono_string_handle_to_utf8 (name, error);
goto_if_nok (error, leave);
int rows = table_info_get_rows (table);
for (i = 0; i < rows; ++i) {
mono_metadata_decode_row (table, i, cols, MONO_MANIFEST_SIZE);
val = mono_metadata_string_heap (assembly->image, cols [MONO_MANIFEST_NAME]);
if (strcmp (val, n) == 0)
break;
}
g_free (n);
if (i == rows)
goto leave;
if (!cols [MONO_MANIFEST_IMPLEMENTATION]) {
MONO_HANDLE_SETVAL (info, location, guint32, RESOURCE_LOCATION_EMBEDDED | RESOURCE_LOCATION_IN_MANIFEST);
}
else {
switch (cols [MONO_MANIFEST_IMPLEMENTATION] & MONO_IMPLEMENTATION_MASK) {
case MONO_IMPLEMENTATION_FILE:
i = cols [MONO_MANIFEST_IMPLEMENTATION] >> MONO_IMPLEMENTATION_BITS;
table = &assembly->image->tables [MONO_TABLE_FILE];
mono_metadata_decode_row (table, i - 1, file_cols, MONO_FILE_SIZE);
val = mono_metadata_string_heap (assembly->image, file_cols [MONO_FILE_NAME]);
MONO_HANDLE_SET (info, filename, mono_string_new_handle (val, error));
if (file_cols [MONO_FILE_FLAGS] & FILE_CONTAINS_NO_METADATA)
MONO_HANDLE_SETVAL (info, location, guint32, 0);
else
MONO_HANDLE_SETVAL (info, location, guint32, RESOURCE_LOCATION_EMBEDDED);
break;
case MONO_IMPLEMENTATION_ASSEMBLYREF:
i = cols [MONO_MANIFEST_IMPLEMENTATION] >> MONO_IMPLEMENTATION_BITS;
mono_assembly_load_reference (assembly->image, i - 1);
if (assembly->image->references [i - 1] == REFERENCE_MISSING) {
mono_error_set_file_not_found (error, NULL, "Assembly %d referenced from assembly %s not found ", i - 1, assembly->image->name);
goto leave;
}
MonoReflectionAssemblyHandle assm_obj;
assm_obj = mono_assembly_get_object_handle (assembly->image->references [i - 1], error);
goto_if_nok (error, leave);
MONO_HANDLE_SET (info, assembly, assm_obj);
/* Obtain info recursively */
get_manifest_resource_info_internal (MONO_HANDLE_GETVAL (assm_obj, assembly), name, info, error);
goto_if_nok (error, leave);
guint32 location;
location = MONO_HANDLE_GETVAL (info, location);
location |= RESOURCE_LOCATION_ANOTHER_ASSEMBLY;
MONO_HANDLE_SETVAL (info, location, guint32, location);
break;
case MONO_IMPLEMENTATION_EXP_TYPE:
g_assert_not_reached ();
break;
}
}
result = TRUE;
leave:
HANDLE_FUNCTION_RETURN_VAL (result);
}
MonoBoolean
ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInfoInternal (MonoQCallAssemblyHandle assembly_h, MonoStringHandle name, MonoManifestResourceInfoHandle info_h, MonoError *error)
{
return get_manifest_resource_info_internal (assembly_h.assembly, name, info_h, error);
}
static gboolean
add_module_to_modules_array (MonoArrayHandle dest, int *dest_idx, MonoImage* module, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
if (module) {
MonoReflectionModuleHandle rm = mono_module_get_object_handle (module, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, *dest_idx, rm);
++(*dest_idx);
}
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
static gboolean
add_file_to_modules_array (MonoArrayHandle dest, int dest_idx, MonoImage *image, MonoTableInfo *table, int table_idx, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
guint32 cols [MONO_FILE_SIZE];
mono_metadata_decode_row (table, table_idx, cols, MONO_FILE_SIZE);
if (cols [MONO_FILE_FLAGS] & FILE_CONTAINS_NO_METADATA) {
MonoReflectionModuleHandle rm = mono_module_file_get_object_handle (image, table_idx, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, dest_idx, rm);
} else {
MonoImage *m = mono_image_load_file_for_image_checked (image, table_idx + 1, error);
goto_if_nok (error, leave);
if (!m) {
const char *filename = mono_metadata_string_heap (image, cols [MONO_FILE_NAME]);
mono_error_set_simple_file_not_found (error, filename);
goto leave;
}
MonoReflectionModuleHandle rm = mono_module_get_object_handle (m, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, dest_idx, rm);
}
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetModulesInternal (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res_h, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoClass *klass;
int i, j, file_count = 0;
MonoImage **modules;
guint32 module_count, real_module_count;
MonoTableInfo *table;
MonoImage *image = assembly->image;
g_assert (image != NULL);
g_assert (!assembly_is_dynamic (assembly));
table = &image->tables [MONO_TABLE_FILE];
file_count = table_info_get_rows (table);
modules = image->modules;
module_count = image->module_count;
real_module_count = 0;
for (i = 0; i < module_count; ++i)
if (modules [i])
real_module_count ++;
klass = mono_class_get_module_class ();
MonoArrayHandle res = mono_array_new_handle (klass, 1 + real_module_count + file_count, error);
return_if_nok (error);
MonoReflectionModuleHandle image_obj = mono_module_get_object_handle (image, error);
return_if_nok (error);
MONO_HANDLE_ARRAY_SETREF (res, 0, image_obj);
j = 1;
for (i = 0; i < module_count; ++i)
if (!add_module_to_modules_array (res, &j, modules[i], error))
return;
for (i = 0; i < file_count; ++i, ++j) {
if (!add_file_to_modules_array (res, j, image, table, i, error))
return;
}
HANDLE_ON_STACK_SET (res_h, MONO_HANDLE_RAW (res));
}
MonoReflectionMethodHandle
ves_icall_GetCurrentMethod (MonoError *error)
{
MonoMethod *m = mono_method_get_last_managed ();
if (!m) {
mono_error_set_not_supported (error, "Stack walks are not supported on this platform.");
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
}
while (m->is_inflated)
m = ((MonoMethodInflated*)m)->declaring;
return mono_method_get_object_handle (m, NULL, error);
}
static MonoMethod*
mono_method_get_equivalent_method (MonoMethod *method, MonoClass *klass)
{
int offset = -1, i;
if (method->is_inflated && ((MonoMethodInflated*)method)->context.method_inst) {
ERROR_DECL (error);
MonoMethod *result;
MonoMethodInflated *inflated = (MonoMethodInflated*)method;
//method is inflated, we should inflate it on the other class
MonoGenericContext ctx;
ctx.method_inst = inflated->context.method_inst;
ctx.class_inst = inflated->context.class_inst;
if (mono_class_is_ginst (klass))
ctx.class_inst = mono_class_get_generic_class (klass)->context.class_inst;
else if (mono_class_is_gtd (klass))
ctx.class_inst = mono_class_get_generic_container (klass)->context.class_inst;
result = mono_class_inflate_generic_method_full_checked (inflated->declaring, klass, &ctx, error);
g_assert (is_ok (error)); /* FIXME don't swallow the error */
return result;
}
mono_class_setup_methods (method->klass);
if (mono_class_has_failure (method->klass))
return NULL;
int mcount = mono_class_get_method_count (method->klass);
MonoMethod **method_klass_methods = m_class_get_methods (method->klass);
for (i = 0; i < mcount; ++i) {
if (method_klass_methods [i] == method) {
offset = i;
break;
}
}
mono_class_setup_methods (klass);
if (mono_class_has_failure (klass))
return NULL;
g_assert (offset >= 0 && offset < mono_class_get_method_count (klass));
return m_class_get_methods (klass) [offset];
}
MonoReflectionMethodHandle
ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodFromHandleInternalType_native (MonoMethod *method, MonoType *type, MonoBoolean generic_check, MonoError *error)
{
MonoClass *klass;
if (type && generic_check) {
klass = mono_class_from_mono_type_internal (type);
if (mono_class_get_generic_type_definition (method->klass) != mono_class_get_generic_type_definition (klass))
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
if (method->klass != klass) {
method = mono_method_get_equivalent_method (method, klass);
if (!method)
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
}
} else if (type)
klass = mono_class_from_mono_type_internal (type);
else
klass = method->klass;
return mono_method_get_object_handle (method, klass, error);
}
MonoReflectionMethodBodyHandle
ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodBodyInternal (MonoMethod *method, MonoError *error)
{
return mono_method_body_get_object_handle (method, error);
}
MonoReflectionAssemblyHandle
ves_icall_System_Reflection_Assembly_GetExecutingAssembly (MonoStackCrawlMark *stack_mark, MonoError *error)
{
MonoAssembly *assembly;
assembly = mono_runtime_get_caller_from_stack_mark (stack_mark);
g_assert (assembly);
return mono_assembly_get_object_handle (assembly, error);
}
MonoReflectionAssemblyHandle
ves_icall_System_Reflection_Assembly_GetEntryAssembly (MonoError *error)
{
MonoAssembly *assembly = mono_runtime_get_entry_assembly ();
if (!assembly)
return MONO_HANDLE_CAST (MonoReflectionAssembly, NULL_HANDLE);
return mono_assembly_get_object_handle (assembly, error);
}
MonoReflectionAssemblyHandle
ves_icall_System_Reflection_Assembly_GetCallingAssembly (MonoError *error)
{
MonoMethod *m;
MonoMethod *dest;
dest = NULL;
mono_stack_walk_no_il (get_executing, &dest);
m = dest;
mono_stack_walk_no_il (get_caller_no_reflection, &dest);
if (!dest)
dest = m;
if (!m) {
mono_error_set_not_supported (error, "Stack walks are not supported on this platform.");
return MONO_HANDLE_CAST (MonoReflectionAssembly, NULL_HANDLE);
}
return mono_assembly_get_object_handle (m_class_get_image (dest->klass)->assembly, error);
}
void
ves_icall_System_RuntimeType_getFullName (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoBoolean full_name,
MonoBoolean assembly_qualified, MonoError *error)
{
MonoType *type = type_handle.type;
MonoTypeNameFormat format;
gchar *name;
if (full_name)
format = assembly_qualified ?
MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED :
MONO_TYPE_NAME_FORMAT_FULL_NAME;
else
format = MONO_TYPE_NAME_FORMAT_REFLECTION;
name = mono_type_get_name_full (type, format);
if (!name)
return;
if (full_name && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
g_free (name);
return;
}
HANDLE_ON_STACK_SET (res, mono_string_new_checked (name, error));
g_free (name);
}
MonoAssemblyName *
ves_icall_System_Reflection_AssemblyName_GetNativeName (MonoAssembly *mass)
{
return &mass->aname;
}
static gboolean
mono_module_type_is_visible (MonoTableInfo *tdef, MonoImage *image, int type)
{
guint32 attrs, visibility;
do {
attrs = mono_metadata_decode_row_col (tdef, type - 1, MONO_TYPEDEF_FLAGS);
visibility = attrs & TYPE_ATTRIBUTE_VISIBILITY_MASK;
if (visibility != TYPE_ATTRIBUTE_PUBLIC && visibility != TYPE_ATTRIBUTE_NESTED_PUBLIC)
return FALSE;
} while ((type = mono_metadata_token_index (mono_metadata_nested_in_typedef (image, type))));
return TRUE;
}
static void
image_get_type (MonoImage *image, MonoTableInfo *tdef, int table_idx, int count, MonoArrayHandle res, MonoArrayHandle exceptions, MonoBoolean exportedOnly, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
ERROR_DECL (klass_error);
MonoClass *klass = mono_class_get_checked (image, table_idx | MONO_TOKEN_TYPE_DEF, klass_error);
if (klass) {
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
return_if_nok (error);
MONO_HANDLE_ARRAY_SETREF (res, count, rt);
} else {
MonoExceptionHandle ex = mono_error_convert_to_exception_handle (klass_error);
MONO_HANDLE_ARRAY_SETREF (exceptions, count, ex);
}
HANDLE_FUNCTION_RETURN ();
}
static MonoArrayHandle
mono_module_get_types (MonoImage *image, MonoArrayHandleOut exceptions, MonoBoolean exportedOnly, MonoError *error)
{
/* FIXME: metadata-update */
MonoTableInfo *tdef = &image->tables [MONO_TABLE_TYPEDEF];
int rows = table_info_get_rows (tdef);
int i, count;
/* we start the count from 1 because we skip the special type <Module> */
if (exportedOnly) {
count = 0;
for (i = 1; i < rows; ++i) {
if (mono_module_type_is_visible (tdef, image, i + 1))
count++;
}
} else {
count = rows - 1;
}
MonoArrayHandle res = mono_array_new_handle (mono_defaults.runtimetype_class, count, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
MONO_HANDLE_ASSIGN (exceptions, mono_array_new_handle (mono_defaults.exception_class, count, error));
return_val_if_nok (error, NULL_HANDLE_ARRAY);
count = 0;
for (i = 1; i < rows; ++i) {
if (!exportedOnly || mono_module_type_is_visible (tdef, image, i+1)) {
image_get_type (image, tdef, i + 1, count, res, exceptions, exportedOnly, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
count++;
}
}
return res;
}
static void
append_module_types (MonoArrayHandleOut res, MonoArrayHandleOut exceptions, MonoImage *image, MonoBoolean exportedOnly, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoArrayHandle ex2 = MONO_HANDLE_NEW (MonoArray, NULL);
MonoArrayHandle res2 = mono_module_get_types (image, ex2, exportedOnly, error);
goto_if_nok (error, leave);
/* Append the new types to the end of the array */
if (mono_array_handle_length (res2) > 0) {
guint32 len1, len2;
len1 = mono_array_handle_length (res);
len2 = mono_array_handle_length (res2);
MonoArrayHandle res3 = mono_array_new_handle (mono_defaults.runtimetype_class, len1 + len2, error);
goto_if_nok (error, leave);
mono_array_handle_memcpy_refs (res3, 0, res, 0, len1);
mono_array_handle_memcpy_refs (res3, len1, res2, 0, len2);
MONO_HANDLE_ASSIGN (res, res3);
MonoArrayHandle ex3 = mono_array_new_handle (mono_defaults.runtimetype_class, len1 + len2, error);
goto_if_nok (error, leave);
mono_array_handle_memcpy_refs (ex3, 0, exceptions, 0, len1);
mono_array_handle_memcpy_refs (ex3, len1, ex2, 0, len2);
MONO_HANDLE_ASSIGN (exceptions, ex3);
}
leave:
HANDLE_FUNCTION_RETURN ();
}
static void
set_class_failure_in_array (MonoArrayHandle exl, int i, MonoClass *klass)
{
HANDLE_FUNCTION_ENTER ();
ERROR_DECL (unboxed_error);
mono_error_set_for_class_failure (unboxed_error, klass);
MonoExceptionHandle exc = MONO_HANDLE_NEW (MonoException, mono_error_convert_to_exception (unboxed_error));
MONO_HANDLE_ARRAY_SETREF (exl, i, exc);
HANDLE_FUNCTION_RETURN ();
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetExportedTypes (MonoQCallAssemblyHandle assembly_handle, MonoObjectHandleOnStack res_h,
MonoError *error)
{
MonoArrayHandle exceptions = MONO_HANDLE_NEW(MonoArray, NULL);
MonoAssembly *assembly = assembly_handle.assembly;
int i;
g_assert (!assembly_is_dynamic (assembly));
MonoImage *image = assembly->image;
MonoTableInfo *table = &image->tables [MONO_TABLE_FILE];
MonoArrayHandle res = mono_module_get_types (image, exceptions, TRUE, error);
return_if_nok (error);
/* Append data from all modules in the assembly */
int rows = table_info_get_rows (table);
for (i = 0; i < rows; ++i) {
if (!(mono_metadata_decode_row_col (table, i, MONO_FILE_FLAGS) & FILE_CONTAINS_NO_METADATA)) {
MonoImage *loaded_image = mono_assembly_load_module_checked (image->assembly, i + 1, error);
return_if_nok (error);
if (loaded_image) {
append_module_types (res, exceptions, loaded_image, TRUE, error);
return_if_nok (error);
}
}
}
/* the ReflectionTypeLoadException must have all the types (Types property),
* NULL replacing types which throws an exception. The LoaderException must
* contain all exceptions for NULL items.
*/
int len = mono_array_handle_length (res);
int ex_count = 0;
GList *list = NULL;
MonoReflectionTypeHandle t = MONO_HANDLE_NEW (MonoReflectionType, NULL);
for (i = 0; i < len; i++) {
MONO_HANDLE_ARRAY_GETREF (t, res, i);
if (!MONO_HANDLE_IS_NULL (t)) {
MonoClass *klass = mono_type_get_class_internal (MONO_HANDLE_GETVAL (t, type));
if ((klass != NULL) && mono_class_has_failure (klass)) {
/* keep the class in the list */
list = g_list_append (list, klass);
/* and replace Type with NULL */
MONO_HANDLE_ARRAY_SETREF (res, i, NULL_HANDLE);
}
} else {
ex_count ++;
}
}
if (list || ex_count) {
GList *tmp = NULL;
int j, length = g_list_length (list) + ex_count;
MonoArrayHandle exl = mono_array_new_handle (mono_defaults.exception_class, length, error);
if (!is_ok (error)) {
g_list_free (list);
return;
}
/* Types for which mono_class_get_checked () succeeded */
MonoExceptionHandle exc = MONO_HANDLE_NEW (MonoException, NULL);
for (i = 0, tmp = list; tmp; i++, tmp = tmp->next) {
set_class_failure_in_array (exl, i, (MonoClass*)tmp->data);
}
/* Types for which it don't */
for (j = 0; j < mono_array_handle_length (exceptions); ++j) {
MONO_HANDLE_ARRAY_GETREF (exc, exceptions, j);
if (!MONO_HANDLE_IS_NULL (exc)) {
g_assert (i < length);
MONO_HANDLE_ARRAY_SETREF (exl, i, exc);
i ++;
}
}
g_list_free (list);
list = NULL;
MONO_HANDLE_ASSIGN (exc, mono_get_exception_reflection_type_load_checked (res, exl, error));
return_if_nok (error);
mono_error_set_exception_handle (error, exc);
return;
}
HANDLE_ON_STACK_SET (res_h, MONO_HANDLE_RAW (res));
}
static void
get_top_level_forwarded_type (MonoImage *image, MonoTableInfo *table, int i, MonoArrayHandle types, MonoArrayHandle exceptions, int *aindex, int *exception_count)
{
ERROR_DECL (local_error);
guint32 cols [MONO_EXP_TYPE_SIZE];
MonoClass *klass;
MonoReflectionTypeHandle rt;
mono_metadata_decode_row (table, i, cols, MONO_EXP_TYPE_SIZE);
if (!(cols [MONO_EXP_TYPE_FLAGS] & TYPE_ATTRIBUTE_FORWARDER))
return;
guint32 impl = cols [MONO_EXP_TYPE_IMPLEMENTATION];
const char *name = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAME]);
const char *nspace = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAMESPACE]);
g_assert ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_ASSEMBLYREF);
guint32 assembly_idx = impl >> MONO_IMPLEMENTATION_BITS;
mono_assembly_load_reference (image, assembly_idx - 1);
g_assert (image->references [assembly_idx - 1]);
HANDLE_FUNCTION_ENTER ();
if (image->references [assembly_idx - 1] == REFERENCE_MISSING) {
MonoExceptionHandle ex = MONO_HANDLE_NEW (MonoException, mono_get_exception_bad_image_format ("Invalid image"));
MONO_HANDLE_ARRAY_SETREF (types, *aindex, NULL_HANDLE);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, ex);
(*exception_count)++; (*aindex)++;
goto exit;
}
klass = mono_class_from_name_checked (image->references [assembly_idx - 1]->image, nspace, name, local_error);
if (!is_ok (local_error)) {
MonoExceptionHandle ex = mono_error_convert_to_exception_handle (local_error);
MONO_HANDLE_ARRAY_SETREF (types, *aindex, NULL_HANDLE);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, ex);
mono_error_cleanup (local_error);
(*exception_count)++; (*aindex)++;
goto exit;
}
rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), local_error);
if (!is_ok (local_error)) {
MonoExceptionHandle ex = mono_error_convert_to_exception_handle (local_error);
MONO_HANDLE_ARRAY_SETREF (types, *aindex, NULL_HANDLE);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, ex);
mono_error_cleanup (local_error);
(*exception_count)++; (*aindex)++;
goto exit;
}
MONO_HANDLE_ARRAY_SETREF (types, *aindex, rt);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, NULL_HANDLE);
(*aindex)++;
exit:
HANDLE_FUNCTION_RETURN ();
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetTopLevelForwardedTypes (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res,
MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoImage *image = assembly->image;
int count = 0;
g_assert (!assembly_is_dynamic (assembly));
MonoTableInfo *table = &image->tables [MONO_TABLE_EXPORTEDTYPE];
int rows = table_info_get_rows (table);
for (int i = 0; i < rows; ++i) {
if (mono_metadata_decode_row_col (table, i, MONO_EXP_TYPE_FLAGS) & TYPE_ATTRIBUTE_FORWARDER)
count ++;
}
MonoArrayHandle types = mono_array_new_handle (mono_defaults.runtimetype_class, count, error);
return_if_nok (error);
MonoArrayHandle exceptions = mono_array_new_handle (mono_defaults.exception_class, count, error);
return_if_nok (error);
int aindex = 0;
int exception_count = 0;
for (int i = 0; i < rows; ++i)
get_top_level_forwarded_type (image, table, i, types, exceptions, &aindex, &exception_count);
if (exception_count > 0) {
MonoExceptionHandle exc = MONO_HANDLE_NEW (MonoException, NULL);
MONO_HANDLE_ASSIGN (exc, mono_get_exception_reflection_type_load_checked (types, exceptions, error));
return_if_nok (error);
mono_error_set_exception_handle (error, exc);
return;
}
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (types));
}
void
ves_icall_Mono_RuntimeMarshal_FreeAssemblyName (MonoAssemblyName *aname, MonoBoolean free_struct)
{
mono_assembly_name_free_internal (aname);
if (free_struct)
g_free (aname);
}
void
ves_icall_AssemblyExtensions_ApplyUpdate (MonoAssembly *assm,
gconstpointer dmeta_bytes, int32_t dmeta_len,
gconstpointer dil_bytes, int32_t dil_len,
gconstpointer dpdb_bytes, int32_t dpdb_len)
{
ERROR_DECL (error);
g_assert (assm);
g_assert (dmeta_len >= 0);
MonoImage *image_base = assm->image;
g_assert (image_base);
#ifndef HOST_WASM
if (mono_is_debugger_attached ()) {
mono_error_set_not_supported (error, "Cannot use System.Reflection.Metadata.MetadataUpdater.ApplyChanges while debugger is attached");
mono_error_set_pending_exception (error);
return;
}
#endif
mono_image_load_enc_delta (MONO_ENC_DELTA_API, image_base, dmeta_bytes, dmeta_len, dil_bytes, dil_len, dpdb_bytes, dpdb_len, error);
mono_error_set_pending_exception (error);
}
gint32 ves_icall_AssemblyExtensions_ApplyUpdateEnabled (gint32 just_component_check)
{
// if just_component_check is true, we only care whether the hot_reload component is enabled,
// not whether the environment is appropriately setup to apply updates.
return mono_metadata_update_available () && (just_component_check || mono_metadata_update_enabled (NULL));
}
MonoReflectionTypeHandle
ves_icall_System_Reflection_RuntimeModule_GetGlobalType (MonoImage *image, MonoError *error)
{
MonoClass *klass;
g_assert (image);
MonoReflectionTypeHandle ret = MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
if (image_is_dynamic (image) && ((MonoDynamicImage*)image)->initial_image)
/* These images do not have a global type */
goto leave;
klass = mono_class_get_checked (image, 1 | MONO_TOKEN_TYPE_DEF, error);
goto_if_nok (error, leave);
ret = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
leave:
return ret;
}
void
ves_icall_System_Reflection_RuntimeModule_GetGuidInternal (MonoImage *image, MonoArrayHandle guid_h, MonoError *error)
{
g_assert (mono_array_handle_length (guid_h) == 16);
if (!image->metadata_only) {
g_assert (image->heap_guid.data);
g_assert (image->heap_guid.size >= 16);
MONO_ENTER_NO_SAFEPOINTS;
guint8 *data = (guint8*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (guid_h), 1, 0);
memcpy (data, (guint8*)image->heap_guid.data, 16);
MONO_EXIT_NO_SAFEPOINTS;
} else {
MONO_ENTER_NO_SAFEPOINTS;
guint8 *data = (guint8*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (guid_h), 1, 0);
memset (data, 0, 16);
MONO_EXIT_NO_SAFEPOINTS;
}
}
void
ves_icall_System_Reflection_RuntimeModule_GetPEKind (MonoImage *image, gint32 *pe_kind, gint32 *machine, MonoError *error)
{
if (image_is_dynamic (image)) {
MonoDynamicImage *dyn = (MonoDynamicImage*)image;
*pe_kind = dyn->pe_kind;
*machine = dyn->machine;
}
else {
*pe_kind = (image->image_info->cli_cli_header.ch_flags & 0x3);
*machine = image->image_info->cli_header.coff.coff_machine;
}
}
gint32
ves_icall_System_Reflection_RuntimeModule_GetMDStreamVersion (MonoImage *image, MonoError *error)
{
return (image->md_version_major << 16) | (image->md_version_minor);
}
MonoArrayHandle
ves_icall_System_Reflection_RuntimeModule_InternalGetTypes (MonoImage *image, MonoError *error)
{
if (!image) {
MonoArrayHandle arr = mono_array_new_handle (mono_defaults.runtimetype_class, 0, error);
return arr;
} else {
MonoArrayHandle exceptions = MONO_HANDLE_NEW (MonoArray, NULL);
MonoArrayHandle res = mono_module_get_types (image, exceptions, FALSE, error);
return_val_if_nok (error, MONO_HANDLE_CAST(MonoArray, NULL_HANDLE));
int n = mono_array_handle_length (exceptions);
MonoExceptionHandle ex = MONO_HANDLE_NEW (MonoException, NULL);
for (int i = 0; i < n; ++i) {
MONO_HANDLE_ARRAY_GETREF(ex, exceptions, i);
if (!MONO_HANDLE_IS_NULL (ex)) {
mono_error_set_exception_handle (error, ex);
return MONO_HANDLE_CAST(MonoArray, NULL_HANDLE);
}
}
return res;
}
}
static gboolean
mono_memberref_is_method (MonoImage *image, guint32 token)
{
if (!image_is_dynamic (image)) {
int idx = mono_metadata_token_index (token);
if (idx <= 0 || mono_metadata_table_bounds_check (image, MONO_TABLE_MEMBERREF, idx)) {
return FALSE;
}
guint32 cols [MONO_MEMBERREF_SIZE];
const MonoTableInfo *table = &image->tables [MONO_TABLE_MEMBERREF];
mono_metadata_decode_row (table, idx - 1, cols, MONO_MEMBERREF_SIZE);
const char *sig = mono_metadata_blob_heap (image, cols [MONO_MEMBERREF_SIGNATURE]);
mono_metadata_decode_blob_size (sig, &sig);
return (*sig != 0x6);
} else {
ERROR_DECL (error);
MonoClass *handle_class;
if (!mono_lookup_dynamic_token_class (image, token, FALSE, &handle_class, NULL, error)) {
mono_error_cleanup (error); /* just probing, ignore error */
return FALSE;
}
return mono_defaults.methodhandle_class == handle_class;
}
}
static MonoGenericInst *
get_generic_inst_from_array_handle (MonoArrayHandle type_args)
{
int type_argc = mono_array_handle_length (type_args);
int size = MONO_SIZEOF_GENERIC_INST + type_argc * sizeof (MonoType *);
MonoGenericInst *ginst = (MonoGenericInst *)g_alloca (size);
memset (ginst, 0, MONO_SIZEOF_GENERIC_INST);
ginst->type_argc = type_argc;
for (int i = 0; i < type_argc; i++) {
MONO_HANDLE_ARRAY_GETVAL (ginst->type_argv[i], type_args, MonoType*, i);
}
ginst->is_open = FALSE;
for (int i = 0; i < type_argc; i++) {
if (mono_class_is_open_constructed_type (ginst->type_argv[i])) {
ginst->is_open = TRUE;
break;
}
}
return mono_metadata_get_canonical_generic_inst (ginst);
}
static void
init_generic_context_from_args_handles (MonoGenericContext *context, MonoArrayHandle type_args, MonoArrayHandle method_args)
{
if (!MONO_HANDLE_IS_NULL (type_args)) {
context->class_inst = get_generic_inst_from_array_handle (type_args);
} else {
context->class_inst = NULL;
}
if (!MONO_HANDLE_IS_NULL (method_args)) {
context->method_inst = get_generic_inst_from_array_handle (method_args);
} else {
context->method_inst = NULL;
}
}
static MonoType*
module_resolve_type_token (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoType *result = NULL;
MonoClass *klass;
int table = mono_metadata_token_table (token);
int index = mono_metadata_token_index (token);
MonoGenericContext context;
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if ((table != MONO_TABLE_TYPEDEF) && (table != MONO_TABLE_TYPEREF) &&
(table != MONO_TABLE_TYPESPEC)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
if (image_is_dynamic (image)) {
if ((table == MONO_TABLE_TYPEDEF) || (table == MONO_TABLE_TYPEREF)) {
ERROR_DECL (inner_error);
klass = (MonoClass *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, inner_error);
mono_error_cleanup (inner_error);
result = klass ? m_class_get_byval_arg (klass) : NULL;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
ERROR_DECL (inner_error);
klass = (MonoClass *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, &context, inner_error);
mono_error_cleanup (inner_error);
result = klass ? m_class_get_byval_arg (klass) : NULL;
goto leave;
}
if ((index <= 0) || mono_metadata_table_bounds_check (image, table, index)) {
*resolve_error = ResolveTokenError_OutOfRange;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
klass = mono_class_get_checked (image, token, error);
if (klass)
klass = mono_class_inflate_generic_class_checked (klass, &context, error);
goto_if_nok (error, leave);
if (klass)
result = m_class_get_byval_arg (klass);
leave:
HANDLE_FUNCTION_RETURN_VAL (result);
}
MonoType*
ves_icall_System_Reflection_RuntimeModule_ResolveTypeToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
return module_resolve_type_token (image, token, type_args, method_args, resolve_error, error);
}
static MonoMethod*
module_resolve_method_token (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoMethod *method = NULL;
int table = mono_metadata_token_table (token);
int index = mono_metadata_token_index (token);
MonoGenericContext context;
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if ((table != MONO_TABLE_METHOD) && (table != MONO_TABLE_METHODSPEC) &&
(table != MONO_TABLE_MEMBERREF)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
if (image_is_dynamic (image)) {
if (table == MONO_TABLE_METHOD) {
ERROR_DECL (inner_error);
method = (MonoMethod *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if ((table == MONO_TABLE_MEMBERREF) && !(mono_memberref_is_method (image, token))) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
ERROR_DECL (inner_error);
method = (MonoMethod *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, &context, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if ((index <= 0) || mono_metadata_table_bounds_check (image, table, index)) {
*resolve_error = ResolveTokenError_OutOfRange;
goto leave;
}
if ((table == MONO_TABLE_MEMBERREF) && (!mono_memberref_is_method (image, token))) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
method = mono_get_method_checked (image, token, NULL, &context, error);
leave:
HANDLE_FUNCTION_RETURN_VAL (method);
}
MonoMethod*
ves_icall_System_Reflection_RuntimeModule_ResolveMethodToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
return module_resolve_method_token (image, token, type_args, method_args, resolve_error, error);
}
MonoStringHandle
ves_icall_System_Reflection_RuntimeModule_ResolveStringToken (MonoImage *image, guint32 token, MonoResolveTokenError *resolve_error, MonoError *error)
{
int index = mono_metadata_token_index (token);
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if (mono_metadata_token_code (token) != MONO_TOKEN_STRING) {
*resolve_error = ResolveTokenError_BadTable;
return NULL_HANDLE_STRING;
}
if (image_is_dynamic (image)) {
ERROR_DECL (ignore_inner_error);
// FIXME ignoring error
// FIXME Push MONO_HANDLE_NEW to lower layers.
MonoStringHandle result = MONO_HANDLE_NEW (MonoString, (MonoString*)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, ignore_inner_error));
mono_error_cleanup (ignore_inner_error);
return result;
}
if ((index <= 0) || (index >= image->heap_us.size)) {
*resolve_error = ResolveTokenError_OutOfRange;
return NULL_HANDLE_STRING;
}
/* FIXME: What to do if the index points into the middle of a string ? */
return mono_ldstr_handle (image, index, error);
}
static MonoClassField*
module_resolve_field_token (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoClass *klass;
int table = mono_metadata_token_table (token);
int index = mono_metadata_token_index (token);
MonoGenericContext context;
MonoClassField *field = NULL;
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if ((table != MONO_TABLE_FIELD) && (table != MONO_TABLE_MEMBERREF)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
if (image_is_dynamic (image)) {
if (table == MONO_TABLE_FIELD) {
ERROR_DECL (inner_error);
field = (MonoClassField *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if (mono_memberref_is_method (image, token)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
ERROR_DECL (inner_error);
field = (MonoClassField *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, &context, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if ((index <= 0) || mono_metadata_table_bounds_check (image, table, index)) {
*resolve_error = ResolveTokenError_OutOfRange;
goto leave;
}
if ((table == MONO_TABLE_MEMBERREF) && (mono_memberref_is_method (image, token))) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
field = mono_field_from_token_checked (image, token, &klass, &context, error);
leave:
HANDLE_FUNCTION_RETURN_VAL (field);
}
MonoClassField*
ves_icall_System_Reflection_RuntimeModule_ResolveFieldToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
return module_resolve_field_token (image, token, type_args, method_args, resolve_error, error);
}
MonoObjectHandle
ves_icall_System_Reflection_RuntimeModule_ResolveMemberToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *error, MonoError *merror)
{
int table = mono_metadata_token_table (token);
*error = ResolveTokenError_Other;
switch (table) {
case MONO_TABLE_TYPEDEF:
case MONO_TABLE_TYPEREF:
case MONO_TABLE_TYPESPEC: {
MonoType *t = module_resolve_type_token (image, token, type_args, method_args, error, merror);
if (t) {
return MONO_HANDLE_CAST (MonoObject, mono_type_get_object_handle (t, merror));
}
else
return NULL_HANDLE;
}
case MONO_TABLE_METHOD:
case MONO_TABLE_METHODSPEC: {
MonoMethod *m = module_resolve_method_token (image, token, type_args, method_args, error, merror);
if (m) {
return MONO_HANDLE_CAST (MonoObject, mono_method_get_object_handle (m, m->klass, merror));
} else
return NULL_HANDLE;
}
case MONO_TABLE_FIELD: {
MonoClassField *f = module_resolve_field_token (image, token, type_args, method_args, error, merror);
if (f) {
return MONO_HANDLE_CAST (MonoObject, mono_field_get_object_handle (m_field_get_parent (f), f, merror));
}
else
return NULL_HANDLE;
}
case MONO_TABLE_MEMBERREF:
if (mono_memberref_is_method (image, token)) {
MonoMethod *m = module_resolve_method_token (image, token, type_args, method_args, error, merror);
if (m) {
return MONO_HANDLE_CAST (MonoObject, mono_method_get_object_handle (m, m->klass, merror));
} else
return NULL_HANDLE;
}
else {
MonoClassField *f = module_resolve_field_token (image, token, type_args, method_args, error, merror);
if (f) {
return MONO_HANDLE_CAST (MonoObject, mono_field_get_object_handle (m_field_get_parent (f), f, merror));
}
else
return NULL_HANDLE;
}
break;
default:
*error = ResolveTokenError_BadTable;
}
return NULL_HANDLE;
}
MonoArrayHandle
ves_icall_System_Reflection_RuntimeModule_ResolveSignature (MonoImage *image, guint32 token, MonoResolveTokenError *resolve_error, MonoError *error)
{
int table = mono_metadata_token_table (token);
int idx = mono_metadata_token_index (token);
MonoTableInfo *tables = image->tables;
guint32 sig, len;
const char *ptr;
*resolve_error = ResolveTokenError_OutOfRange;
/* FIXME: Support other tables ? */
if (table != MONO_TABLE_STANDALONESIG)
return NULL_HANDLE_ARRAY;
if (image_is_dynamic (image))
return NULL_HANDLE_ARRAY;
if ((idx == 0) || mono_metadata_table_bounds_check (image, MONO_TABLE_STANDALONESIG, idx))
return NULL_HANDLE_ARRAY;
sig = mono_metadata_decode_row_col (&tables [MONO_TABLE_STANDALONESIG], idx - 1, 0);
ptr = mono_metadata_blob_heap (image, sig);
len = mono_metadata_decode_blob_size (ptr, &ptr);
MonoArrayHandle res = mono_array_new_handle (mono_defaults.byte_class, len, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
// FIXME MONO_ENTER_NO_SAFEPOINTS instead of pin/gchandle.
MonoGCHandle h;
gpointer array_base = MONO_ARRAY_HANDLE_PIN (res, guint8, 0, &h);
memcpy (array_base, ptr, len);
mono_gchandle_free_internal (h);
return res;
}
static void
check_for_invalid_array_type (MonoType *type, MonoError *error)
{
gboolean allowed = TRUE;
char *name;
if (m_type_is_byref (type))
allowed = FALSE;
else if (type->type == MONO_TYPE_TYPEDBYREF)
allowed = FALSE;
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (m_class_is_byreflike (klass))
allowed = FALSE;
if (allowed)
return;
name = mono_type_get_full_name (klass);
mono_error_set_type_load_name (error, name, g_strdup (""), "");
}
static void
check_for_invalid_byref_or_pointer_type (MonoClass *klass, MonoError *error)
{
return;
}
void
ves_icall_RuntimeType_make_array_type (MonoQCallTypeHandle type_handle, int rank, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
check_for_invalid_array_type (type, error);
return_if_nok (error);
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoClass *aklass;
if (rank == 0) //single dimension array
aklass = mono_class_create_array (klass, 1);
else
aklass = mono_class_create_bounded_array (klass, rank, TRUE);
if (mono_class_has_failure (aklass)) {
mono_error_set_for_class_failure (error, aklass);
return;
}
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (aklass), error));
}
void
ves_icall_RuntimeType_make_byref_type (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
check_for_invalid_byref_or_pointer_type (klass, error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_this_arg (klass), error));
}
void
ves_icall_RuntimeType_make_pointer_type (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
check_for_invalid_byref_or_pointer_type (klass, error);
return_if_nok (error);
MonoClass *pklass = mono_class_create_ptr (type);
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (pklass), error));
}
MonoObjectHandle
ves_icall_System_Delegate_CreateDelegate_internal (MonoQCallTypeHandle type_handle, MonoObjectHandle target,
MonoReflectionMethodHandle info, MonoBoolean throwOnBindFailure, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *delegate_class = mono_class_from_mono_type_internal (type);
MonoMethod *method = MONO_HANDLE_GETVAL (info, method);
MonoMethodSignature *sig = mono_method_signature_internal (method);
mono_class_init_checked (delegate_class, error);
return_val_if_nok (error, NULL_HANDLE);
if (!(m_class_get_parent (delegate_class) == mono_defaults.multicastdelegate_class)) {
/* FIXME improve this exception message */
mono_error_set_execution_engine (error, "file %s: line %d (%s): assertion failed: (%s)", __FILE__, __LINE__,
__func__,
"delegate_class->parent == mono_defaults.multicastdelegate_class");
return NULL_HANDLE;
}
if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
if (!method->is_inflated) {
mono_error_set_argument (error, "method", " Cannot bind to the target method because its signature differs from that of the delegate type");
return NULL_HANDLE;
}
}
MonoObjectHandle delegate = mono_object_new_handle (delegate_class, error);
return_val_if_nok (error, NULL_HANDLE);
if (!method_is_dynamic (method) && (!MONO_HANDLE_IS_NULL (target) && method->flags & METHOD_ATTRIBUTE_VIRTUAL && method->klass != mono_handle_class (target))) {
method = mono_object_handle_get_virtual_method (target, method, error);
return_val_if_nok (error, NULL_HANDLE);
}
mono_delegate_ctor (delegate, target, NULL, method, error);
return_val_if_nok (error, NULL_HANDLE);
return delegate;
}
MonoMulticastDelegateHandle
ves_icall_System_Delegate_AllocDelegateLike_internal (MonoDelegateHandle delegate, MonoError *error)
{
MonoClass *klass = mono_handle_class (delegate);
g_assert (mono_class_has_parent (klass, mono_defaults.multicastdelegate_class));
MonoMulticastDelegateHandle ret = MONO_HANDLE_CAST (MonoMulticastDelegate, mono_object_new_handle (klass, error));
return_val_if_nok (error, MONO_HANDLE_CAST (MonoMulticastDelegate, NULL_HANDLE));
mono_get_runtime_callbacks ()->init_delegate (MONO_HANDLE_CAST (MonoDelegate, ret), NULL_HANDLE, NULL, NULL, error);
return ret;
}
MonoReflectionMethodHandle
ves_icall_System_Delegate_GetVirtualMethod_internal (MonoDelegateHandle delegate, MonoError *error)
{
MonoObjectHandle delegate_target = MONO_HANDLE_NEW_GET (MonoObject, delegate, target);
MonoMethod *m = mono_object_handle_get_virtual_method (delegate_target, MONO_HANDLE_GETVAL (delegate, method), error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
return mono_method_get_object_handle (m, m->klass, error);
}
/* System.Buffer */
static gint32
mono_array_get_byte_length (MonoArrayHandle array)
{
int length;
MonoClass * const klass = mono_handle_class (array);
// This resembles mono_array_get_length, but adds the loop.
if (mono_handle_array_has_bounds (array)) {
length = 1;
const int klass_rank = m_class_get_rank (klass);
for (int i = 0; i < klass_rank; ++ i)
length *= MONO_HANDLE_GETVAL (array, bounds [i].length);
} else {
length = mono_array_handle_length (array);
}
switch (m_class_get_byval_arg (m_class_get_element_class (klass))->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
return length;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
return length << 1;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_R4:
return length << 2;
case MONO_TYPE_I:
case MONO_TYPE_U:
return length * sizeof (gpointer);
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R8:
return length << 3;
default:
return -1;
}
}
/* System.Environment */
MonoArrayHandle
ves_icall_System_Environment_GetCommandLineArgs (MonoError *error)
{
MonoArrayHandle result = mono_runtime_get_main_args_handle (error);
return result;
}
void
ves_icall_System_Environment_Exit (int result)
{
mono_environment_exitcode_set (result);
if (!mono_runtime_try_shutdown ())
mono_thread_exit ();
mono_runtime_quit_internal ();
/* we may need to do some cleanup here... */
exit (result);
}
void
ves_icall_System_Environment_FailFast (MonoStringHandle message, MonoExceptionHandle exception, MonoStringHandle errorSource, MonoError *error)
{
if (MONO_HANDLE_IS_NULL (message)) {
g_warning ("Process terminated.");
} else {
char *msg = mono_string_handle_to_utf8 (message, error);
g_warning ("Process terminated due to \"%s\"", msg);
g_free (msg);
}
if (!MONO_HANDLE_IS_NULL (exception)) {
mono_print_unhandled_exception_internal ((MonoObject *) MONO_HANDLE_RAW (exception));
}
// NOTE: While this does trigger WER on Windows it doesn't quite provide all the
// information in the error dump that CoreCLR would. On Windows 7+ we should call
// RaiseFailFastException directly instead of relying on the C runtime doing it
// for us and pass it as much information as possible. On Windows 8+ we can also
// use the __fastfail intrinsic.
abort ();
}
gint32
ves_icall_System_Environment_get_TickCount (void)
{
/* this will overflow after ~24 days */
return (gint32) (mono_msec_boottime () & 0xffffffff);
}
gint64
ves_icall_System_Environment_get_TickCount64 (void)
{
return mono_msec_boottime ();
}
gpointer
ves_icall_RuntimeMethodHandle_GetFunctionPointer (MonoMethod *method, MonoError *error)
{
/* WISH: we should do this in managed */
if (G_UNLIKELY (mono_method_has_unmanaged_callers_only_attribute (method))) {
method = mono_marshal_get_managed_wrapper (method, NULL, (MonoGCHandle)0, error);
return_val_if_nok (error, NULL);
}
return mono_get_runtime_callbacks ()->get_ftnptr (method, error);
}
MonoBoolean
ves_icall_System_Diagnostics_Debugger_IsAttached_internal (void)
{
return mono_is_debugger_attached ();
}
MonoBoolean
ves_icall_System_Diagnostics_Debugger_IsLogging (void)
{
return mono_get_runtime_callbacks ()->debug_log_is_enabled
&& mono_get_runtime_callbacks ()->debug_log_is_enabled ();
}
void
ves_icall_System_Diagnostics_Debugger_Log (int level, MonoString *volatile* category, MonoString *volatile* message)
{
if (mono_get_runtime_callbacks ()->debug_log)
mono_get_runtime_callbacks ()->debug_log (level, *category, *message);
}
/* Only used for value types */
MonoObjectHandle
ves_icall_System_RuntimeType_CreateInstanceInternal (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
(void)klass;
mono_class_init_checked (klass, error);
return_val_if_nok (error, NULL_HANDLE);
if (mono_class_is_nullable (klass))
/* No arguments -> null */
return NULL_HANDLE;
return mono_object_new_handle (klass, error);
}
MonoReflectionMethodHandle
ves_icall_RuntimeMethodInfo_get_base_method (MonoReflectionMethodHandle m, MonoBoolean definition, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (m, method);
MonoMethod *base = mono_method_get_base_method (method, definition, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
if (base == method) {
/* we want to short-circuit and return 'm' here. But we should
return the same method object that
mono_method_get_object_handle, below would return. Since
that call takes NULL for the reftype argument, it will take
base->klass as the reflected type for the MonoMethod. So we
need to check that m also has base->klass as the reflected
type. */
MonoReflectionTypeHandle orig_reftype = MONO_HANDLE_NEW_GET (MonoReflectionType, m, reftype);
MonoClass *orig_klass = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (orig_reftype, type));
if (base->klass == orig_klass)
return m;
}
return mono_method_get_object_handle (base, NULL, error);
}
MonoStringHandle
ves_icall_RuntimeMethodInfo_get_name (MonoReflectionMethodHandle m, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (m, method);
MonoStringHandle s = mono_string_new_handle (method->name, error);
return_val_if_nok (error, NULL_HANDLE_STRING);
MONO_HANDLE_SET (m, name, s);
return s;
}
void
ves_icall_System_ArgIterator_Setup (MonoArgIterator *iter, char* argsp, char* start)
{
iter->sig = *(MonoMethodSignature**)argsp;
g_assert (iter->sig->sentinelpos <= iter->sig->param_count);
g_assert (iter->sig->call_convention == MONO_CALL_VARARG);
iter->next_arg = 0;
/* FIXME: it's not documented what start is exactly... */
if (start) {
iter->args = start;
} else {
iter->args = argsp + sizeof (gpointer);
}
iter->num_args = iter->sig->param_count - iter->sig->sentinelpos;
/* g_print ("sig %p, param_count: %d, sent: %d\n", iter->sig, iter->sig->param_count, iter->sig->sentinelpos); */
}
void
ves_icall_System_ArgIterator_IntGetNextArg (MonoArgIterator *iter, MonoTypedRef *res)
{
guint32 i, arg_size;
gint32 align;
i = iter->sig->sentinelpos + iter->next_arg;
g_assert (i < iter->sig->param_count);
res->type = iter->sig->params [i];
res->klass = mono_class_from_mono_type_internal (res->type);
arg_size = mono_type_stack_size (res->type, &align);
#if defined(__arm__) || defined(__mips__)
iter->args = (guint8*)(((gsize)iter->args + (align) - 1) & ~(align - 1));
#endif
res->value = iter->args;
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
if (arg_size <= sizeof (gpointer)) {
int dummy;
int padding = arg_size - mono_type_size (res->type, &dummy);
res->value = (guint8*)res->value + padding;
}
#endif
iter->args = (char*)iter->args + arg_size;
iter->next_arg++;
/* g_print ("returning arg %d, type 0x%02x of size %d at %p\n", i, res->type->type, arg_size, res->value); */
}
void
ves_icall_System_ArgIterator_IntGetNextArgWithType (MonoArgIterator *iter, MonoTypedRef *res, MonoType *type)
{
guint32 i, arg_size;
gint32 align;
i = iter->sig->sentinelpos + iter->next_arg;
g_assert (i < iter->sig->param_count);
while (i < iter->sig->param_count) {
if (!mono_metadata_type_equal (type, iter->sig->params [i]))
continue;
res->type = iter->sig->params [i];
res->klass = mono_class_from_mono_type_internal (res->type);
/* FIXME: endianess issue... */
arg_size = mono_type_stack_size (res->type, &align);
#if defined(__arm__) || defined(__mips__)
iter->args = (guint8*)(((gsize)iter->args + (align) - 1) & ~(align - 1));
#endif
res->value = iter->args;
iter->args = (char*)iter->args + arg_size;
iter->next_arg++;
/* g_print ("returning arg %d, type 0x%02x of size %d at %p\n", i, res.type->type, arg_size, res.value); */
return;
}
/* g_print ("arg type 0x%02x not found\n", res.type->type); */
memset (res, 0, sizeof (MonoTypedRef));
}
MonoType*
ves_icall_System_ArgIterator_IntGetNextArgType (MonoArgIterator *iter)
{
gint i;
i = iter->sig->sentinelpos + iter->next_arg;
g_assert (i < iter->sig->param_count);
return iter->sig->params [i];
}
MonoObjectHandle
ves_icall_System_TypedReference_ToObject (MonoTypedRef* tref, MonoError *error)
{
return typed_reference_to_object (tref, error);
}
void
ves_icall_System_TypedReference_InternalMakeTypedReference (MonoTypedRef *res, MonoObjectHandle target, MonoArrayHandle fields, MonoReflectionTypeHandle last_field, MonoError *error)
{
MonoType *ftype = NULL;
int i;
memset (res, 0, sizeof (MonoTypedRef));
g_assert (mono_array_handle_length (fields) > 0);
(void)mono_handle_class (target);
int offset = 0;
for (i = 0; i < mono_array_handle_length (fields); ++i) {
MonoClassField *f;
MONO_HANDLE_ARRAY_GETVAL (f, fields, MonoClassField*, i);
g_assert (f);
if (i == 0)
offset = f->offset;
else
offset += f->offset - sizeof (MonoObject);
(void)mono_class_from_mono_type_internal (f->type);
ftype = f->type;
}
res->type = ftype;
res->klass = mono_class_from_mono_type_internal (ftype);
res->value = (guint8*)MONO_HANDLE_RAW (target) + offset;
}
void
ves_icall_System_Runtime_InteropServices_Marshal_Prelink (MonoReflectionMethodHandle method_h, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (method_h, method);
if (!(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
return;
mono_lookup_pinvoke_call_internal (method, error);
/* create the wrapper, too? */
}
int
ves_icall_Interop_Sys_DoubleToString(double value, char *format, char *buffer, int bufferLength)
{
#if defined(TARGET_ARM)
/* workaround for faulty vcmp.f64 implementation on some 32bit ARM CPUs */
guint64 bits = *(guint64 *) &value;
if (bits == 0x1) { /* 4.9406564584124654E-324 */
g_assert (!strcmp (format, "%.40e"));
return snprintf (buffer, bufferLength, "%s", "4.9406564584124654417656879286822137236506e-324");
} else if (bits == 0x4) { /* 2E-323 */
g_assert (!strcmp (format, "%.40e"));
return snprintf (buffer, bufferLength, "%s", "1.9762625833649861767062751714728854894602e-323");
}
#endif
return snprintf(buffer, bufferLength, format, value);
}
static gboolean
add_modifier_to_array (MonoType *type, MonoArrayHandle dest, int dest_idx, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoReflectionTypeHandle rt;
rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, dest_idx, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
/*
* We return NULL for no modifiers so the corlib code can return Type.EmptyTypes
* and avoid useless allocations.
*/
static MonoArrayHandle
type_array_from_modifiers (MonoType *type, int optional, MonoError *error)
{
int i, count = 0;
int cmod_count = mono_type_custom_modifier_count (type);
if (cmod_count == 0)
goto fail;
for (i = 0; i < cmod_count; ++i) {
gboolean required;
(void) mono_type_get_custom_modifier (type, i, &required, error);
goto_if_nok (error, fail);
if ((optional && !required) || (!optional && required))
count++;
}
if (!count)
goto fail;
MonoArrayHandle res;
res = mono_array_new_handle (mono_defaults.systemtype_class, count, error);
goto_if_nok (error, fail);
count = 0;
for (i = 0; i < cmod_count; ++i) {
gboolean required;
MonoType *cmod_type = mono_type_get_custom_modifier (type, i, &required, error);
goto_if_nok (error, fail);
if ((optional && !required) || (!optional && required)) {
if (!add_modifier_to_array (cmod_type, res, count, error))
goto fail;
count++;
}
}
return res;
fail:
return MONO_HANDLE_NEW (MonoArray, NULL);
}
MonoArrayHandle
ves_icall_RuntimeParameterInfo_GetTypeModifiers (MonoReflectionTypeHandle rt, MonoObjectHandle member, int pos, MonoBoolean optional, MonoError *error)
{
MonoType *type = MONO_HANDLE_GETVAL (rt, type);
MonoClass *member_class = mono_handle_class (member);
MonoMethod *method = NULL;
MonoMethodSignature *sig;
if (mono_class_is_reflection_method_or_constructor (member_class)) {
method = MONO_HANDLE_GETVAL (MONO_HANDLE_CAST (MonoReflectionMethod, member), method);
} else if (m_class_get_image (member_class) == mono_defaults.corlib && !strcmp ("RuntimePropertyInfo", m_class_get_name (member_class))) {
MonoProperty *prop = MONO_HANDLE_GETVAL (MONO_HANDLE_CAST (MonoReflectionProperty, member), property);
if (!(method = prop->get))
method = prop->set;
g_assert (method);
} else if (strcmp (m_class_get_name (member_class), "DynamicMethod") == 0 && strcmp (m_class_get_name_space (member_class), "System.Reflection.Emit") == 0) {
MonoArrayHandle params = MONO_HANDLE_NEW_GET (MonoArray, MONO_HANDLE_CAST (MonoReflectionDynamicMethod, member), parameters);
MonoReflectionTypeHandle t = MONO_HANDLE_NEW (MonoReflectionType, NULL);
MONO_HANDLE_ARRAY_GETREF (t, params, pos);
type = mono_reflection_type_handle_mono_type (t, error);
return type_array_from_modifiers (type, optional, error);
} else {
char *type_name = mono_type_get_full_name (member_class);
mono_error_set_not_supported (error, "Custom modifiers on a ParamInfo with member %s are not supported", type_name);
g_free (type_name);
return NULL_HANDLE_ARRAY;
}
sig = mono_method_signature_internal (method);
if (pos == -1)
type = sig->ret;
else
type = sig->params [pos];
return type_array_from_modifiers (type, optional, error);
}
static MonoType*
get_property_type (MonoProperty *prop)
{
MonoMethodSignature *sig;
if (prop->get) {
sig = mono_method_signature_internal (prop->get);
return sig->ret;
} else if (prop->set) {
sig = mono_method_signature_internal (prop->set);
return sig->params [sig->param_count - 1];
}
return NULL;
}
MonoArrayHandle
ves_icall_RuntimePropertyInfo_GetTypeModifiers (MonoReflectionPropertyHandle property, MonoBoolean optional, MonoError *error)
{
MonoProperty *prop = MONO_HANDLE_GETVAL (property, property);
MonoType *type = get_property_type (prop);
if (!type)
return NULL_HANDLE_ARRAY;
return type_array_from_modifiers (type, optional, error);
}
/*
*Construct a MonoType suited to be used to decode a constant blob object.
*
* @type is the target type which will be constructed
* @blob_type is the blob type, for example, that comes from the constant table
* @real_type is the expected constructed type.
*/
static void
mono_type_from_blob_type (MonoType *type, MonoTypeEnum blob_type, MonoType *real_type)
{
type->type = blob_type;
type->data.klass = NULL;
if (blob_type == MONO_TYPE_CLASS)
type->data.klass = mono_defaults.object_class;
else if (real_type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (real_type->data.klass)) {
/* For enums, we need to use the base type */
type->type = MONO_TYPE_VALUETYPE;
type->data.klass = mono_class_from_mono_type_internal (real_type);
} else
type->data.klass = mono_class_from_mono_type_internal (real_type);
}
MonoObjectHandle
ves_icall_property_info_get_default_value (MonoReflectionPropertyHandle property_handle, MonoError* error)
{
MonoReflectionProperty* property = MONO_HANDLE_RAW (property_handle);
MonoType blob_type;
MonoProperty *prop = property->property;
MonoType *type = get_property_type (prop);
MonoTypeEnum def_type;
const char *def_value;
mono_class_init_internal (prop->parent);
if (!(prop->attrs & PROPERTY_ATTRIBUTE_HAS_DEFAULT)) {
mono_error_set_invalid_operation (error, NULL);
return NULL_HANDLE;
}
def_value = mono_class_get_property_default_value (prop, &def_type);
mono_type_from_blob_type (&blob_type, def_type, type);
return mono_get_object_from_blob (&blob_type, def_value, MONO_HANDLE_NEW (MonoString, NULL), error);
}
MonoBoolean
ves_icall_MonoCustomAttrs_IsDefinedInternal (MonoObjectHandle obj, MonoReflectionTypeHandle attr_type, MonoError *error)
{
MonoClass *attr_class = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (attr_type, type));
mono_class_init_checked (attr_class, error);
return_val_if_nok (error, FALSE);
MonoCustomAttrInfo *cinfo = mono_reflection_get_custom_attrs_info_checked (obj, error);
return_val_if_nok (error, FALSE);
if (!cinfo)
return FALSE;
gboolean found = mono_custom_attrs_has_attr (cinfo, attr_class);
if (!cinfo->cached)
mono_custom_attrs_free (cinfo);
return found;
}
MonoArrayHandle
ves_icall_MonoCustomAttrs_GetCustomAttributesInternal (MonoObjectHandle obj, MonoReflectionTypeHandle attr_type, MonoBoolean pseudoattrs, MonoError *error)
{
MonoClass *attr_class;
if (MONO_HANDLE_IS_NULL (attr_type))
attr_class = NULL;
else
attr_class = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (attr_type, type));
if (attr_class) {
mono_class_init_checked (attr_class, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
}
return mono_reflection_get_custom_attrs_by_type_handle (obj, attr_class, error);
}
MonoArrayHandle
ves_icall_MonoCustomAttrs_GetCustomAttributesDataInternal (MonoObjectHandle obj, MonoError *error)
{
return mono_reflection_get_custom_attrs_data_checked (obj, error);
}
#ifndef DISABLE_COM
int
ves_icall_System_Runtime_InteropServices_Marshal_GetHRForException_WinRT(MonoExceptionHandle ex, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.Marshal.GetHRForException_WinRT internal call is not implemented.");
return 0;
}
MonoObjectHandle
ves_icall_System_Runtime_InteropServices_Marshal_GetNativeActivationFactory(MonoObjectHandle type, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.Marshal.GetNativeActivationFactory internal call is not implemented.");
return NULL_HANDLE;
}
void*
ves_icall_System_Runtime_InteropServices_Marshal_GetRawIUnknownForComObjectNoAddRef(MonoObjectHandle obj, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.Marshal.GetRawIUnknownForComObjectNoAddRef internal call is not implemented.");
return NULL;
}
MonoObjectHandle
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_GetRestrictedErrorInfo(MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.GetRestrictedErrorInfo internal call is not implemented.");
return NULL_HANDLE;
}
MonoBoolean
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_RoOriginateLanguageException (int ierr, MonoStringHandle message, void* languageException, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.RoOriginateLanguageException internal call is not implemented.");
return FALSE;
}
void
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_RoReportUnhandledError (MonoObjectHandle oerr, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.RoReportUnhandledError internal call is not implemented.");
}
int
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_WindowsCreateString(MonoStringHandle sourceString, int length, void** hstring, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.WindowsCreateString internal call is not implemented.");
return 0;
}
int
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_WindowsDeleteString(void* hstring, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.WindowsDeleteString internal call is not implemented.");
return 0;
}
mono_unichar2*
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_WindowsGetStringRawBuffer(void* hstring, unsigned* length, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.WindowsGetStringRawBuffer internal call is not implemented.");
return NULL;
}
#endif
static const MonoIcallTableCallbacks *icall_table;
static mono_mutex_t icall_mutex;
static GHashTable *icall_hash = NULL;
typedef struct _MonoIcallHashTableValue {
gconstpointer method;
guint32 flags;
} MonoIcallHashTableValue;
void
mono_install_icall_table_callbacks (const MonoIcallTableCallbacks *cb)
{
g_assert (cb->version == MONO_ICALL_TABLE_CALLBACKS_VERSION);
icall_table = cb;
}
void
mono_icall_init (void)
{
#ifndef DISABLE_ICALL_TABLES
mono_icall_table_init ();
#endif
icall_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free);
mono_os_mutex_init (&icall_mutex);
}
static void
mono_icall_lock (void)
{
mono_locks_os_acquire (&icall_mutex, IcallLock);
}
static void
mono_icall_unlock (void)
{
mono_locks_os_release (&icall_mutex, IcallLock);
}
static void
add_internal_call_with_flags (const char *name, gconstpointer method, guint32 flags)
{
char *key = g_strdup (name);
MonoIcallHashTableValue *value = g_new (MonoIcallHashTableValue, 1);
if (key && value) {
value->method = method;
value->flags = flags;
mono_icall_lock ();
g_hash_table_insert (icall_hash, key, (gpointer)value);
mono_icall_unlock ();
}
}
/**
* mono_dangerous_add_internal_call_coop:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* Similar to \c mono_dangerous_add_raw_internal_call.
*
*/
void
mono_dangerous_add_internal_call_coop (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_COOPERATIVE);
}
/**
* mono_dangerous_add_internal_call_no_wrapper:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* Similar to \c mono_dangerous_add_raw_internal_call but with more requirements for correct
* operation.
*
* The \p method must NOT:
*
* Run for an unbounded amount of time without calling the mono runtime.
* Additionally, the method must switch to GC Safe mode to perform all blocking
* operations: performing blocking I/O, taking locks, etc. The method can't throw or raise
* exceptions or call other methods that will throw or raise exceptions since the runtime won't
* be able to detect exeptions and unwinder won't be able to correctly find last managed frame in callstack.
* This registration method is for icalls that needs very low overhead and follow all rules in their implementation.
*
*/
void
mono_dangerous_add_internal_call_no_wrapper (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_NO_WRAPPER);
}
/**
* mono_add_internal_call:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* This method surfaces the C function pointed by \p method as a method
* that has been surfaced in managed code with the method specified in
* \p name as an internal call.
*
* Internal calls are surfaced to all app domains loaded and they are
* accessibly by a type with the specified name.
*
* You must provide a fully qualified type name, that is namespaces
* and type name, followed by a colon and the method name, with an
* optional signature to bind.
*
* For example, the following are all valid declarations:
*
* \c MyApp.Services.ScriptService:Accelerate
*
* \c MyApp.Services.ScriptService:Slowdown(int,bool)
*
* You use method parameters in cases where there might be more than
* one surface method to managed code. That way you can register different
* internal calls for different method overloads.
*
* The internal calls are invoked with no marshalling. This means that .NET
* types like \c System.String are exposed as \c MonoString* parameters. This is
* different than the way that strings are surfaced in P/Invoke.
*
* For more information on how the parameters are marshalled, see the
* <a href="http://www.mono-project.com/docs/advanced/embedding/">Mono Embedding</a>
* page.
*
* See the <a href="mono-api-methods.html#method-desc">Method Description</a>
* reference for more information on the format of method descriptions.
*/
void
mono_add_internal_call (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_FOREIGN);
}
/**
* mono_dangerous_add_raw_internal_call:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* Similar to \c mono_add_internal_call but with more requirements for correct
* operation.
*
* A thread running a dangerous raw internal call will avoid a thread state
* transition on entry and exit, but it must take responsiblity for cooperating
* with the Mono runtime.
*
* The \p method must NOT:
*
* Run for an unbounded amount of time without calling the mono runtime.
* Additionally, the method must switch to GC Safe mode to perform all blocking
* operations: performing blocking I/O, taking locks, etc.
*
*/
void
mono_dangerous_add_raw_internal_call (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_COOPERATIVE);
}
/**
* mono_add_internal_call_with_flags:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
* \param cooperative if \c TRUE, run icall in GC Unsafe (cooperatively suspended) mode,
* otherwise GC Safe (blocking)
*
* Like \c mono_add_internal_call, but if \p cooperative is \c TRUE the added
* icall promises that it will use the coopertive API to inform the runtime
* when it is running blocking operations, that it will not run for unbounded
* amounts of time without safepointing, and that it will not hold managed
* object references across suspend safepoints.
*
* If \p cooperative is \c FALSE, run the icall in GC Safe mode - the icall may
* block. The icall must obey the GC Safe rules, e.g. it must not touch
* unpinned managed memory.
*
*/
void
mono_add_internal_call_with_flags (const char *name, gconstpointer method, gboolean cooperative)
{
add_internal_call_with_flags (name, method, cooperative ? MONO_ICALL_FLAGS_COOPERATIVE : MONO_ICALL_FLAGS_FOREIGN);
}
void
mono_add_internal_call_internal (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_COOPERATIVE);
}
/*
* we should probably export this as an helper (handle nested types).
* Returns the number of chars written in buf.
*/
static int
concat_class_name (char *buf, int bufsize, MonoClass *klass)
{
int nspacelen, cnamelen;
nspacelen = strlen (m_class_get_name_space (klass));
cnamelen = strlen (m_class_get_name (klass));
if (nspacelen + cnamelen + 2 > bufsize)
return 0;
if (nspacelen) {
memcpy (buf, m_class_get_name_space (klass), nspacelen);
buf [nspacelen ++] = '.';
}
memcpy (buf + nspacelen, m_class_get_name (klass), cnamelen);
buf [nspacelen + cnamelen] = 0;
return nspacelen + cnamelen;
}
static void
no_icall_table (void)
{
g_assert_not_reached ();
}
gboolean
mono_is_missing_icall_addr (gconstpointer addr)
{
return addr == NULL || addr == no_icall_table;
}
/*
* Returns either NULL or no_icall_table for missing icalls.
*/
gconstpointer
mono_lookup_internal_call_full_with_flags (MonoMethod *method, gboolean warn_on_missing, guint32 *flags)
{
char *sigstart = NULL;
char *tmpsig = NULL;
char mname [2048];
char *classname = NULL;
int typelen = 0, mlen, siglen;
gconstpointer res = NULL;
gboolean locked = FALSE;
g_assert (method != NULL);
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
if (m_class_get_nested_in (method->klass)) {
int pos = concat_class_name (mname, sizeof (mname)-2, m_class_get_nested_in (method->klass));
if (!pos)
goto exit;
mname [pos++] = '/';
mname [pos] = 0;
typelen = concat_class_name (mname+pos, sizeof (mname)-pos-1, method->klass);
if (!typelen)
goto exit;
typelen += pos;
} else {
typelen = concat_class_name (mname, sizeof (mname), method->klass);
if (!typelen)
goto exit;
}
classname = g_strdup (mname);
mname [typelen] = ':';
mname [typelen + 1] = ':';
mlen = strlen (method->name);
memcpy (mname + typelen + 2, method->name, mlen);
sigstart = mname + typelen + 2 + mlen;
*sigstart = 0;
tmpsig = mono_signature_get_desc (mono_method_signature_internal (method), TRUE);
siglen = strlen (tmpsig);
if (typelen + mlen + siglen + 6 > sizeof (mname))
goto exit;
sigstart [0] = '(';
memcpy (sigstart + 1, tmpsig, siglen);
sigstart [siglen + 1] = ')';
sigstart [siglen + 2] = 0;
/* mono_marshal_get_native_wrapper () depends on this */
if (method->klass == mono_defaults.string_class && !strcmp (method->name, ".ctor")) {
res = (gconstpointer)ves_icall_System_String_ctor_RedirectToCreateString;
goto exit;
}
mono_icall_lock ();
locked = TRUE;
res = g_hash_table_lookup (icall_hash, mname);
if (res) {
MonoIcallHashTableValue *value = (MonoIcallHashTableValue *)res;
if (flags)
*flags = value->flags;
res = value->method;
goto exit;
}
/* try without signature */
*sigstart = 0;
res = g_hash_table_lookup (icall_hash, mname);
if (res) {
MonoIcallHashTableValue *value = (MonoIcallHashTableValue *)res;
if (flags)
*flags = value->flags;
res = value->method;
goto exit;
}
if (!icall_table) {
/* Fail only when the result is actually used */
res = (gconstpointer)no_icall_table;
goto exit;
} else {
gboolean uses_handles = FALSE;
g_assert (icall_table->lookup);
res = icall_table->lookup (method, classname, sigstart - mlen, sigstart, &uses_handles);
if (res && flags && uses_handles)
*flags = *flags | MONO_ICALL_FLAGS_USES_HANDLES;
mono_icall_unlock ();
locked = FALSE;
if (res)
goto exit;
if (warn_on_missing) {
g_warning ("cant resolve internal call to \"%s\" (tested without signature also)", mname);
g_print ("\nYour mono runtime and class libraries are out of sync.\n");
g_print ("The out of sync library is: %s\n", m_class_get_image (method->klass)->name);
g_print ("\nWhen you update one from git you need to update, compile and install\nthe other too.\n");
g_print ("Do not report this as a bug unless you're sure you have updated correctly:\nyou probably have a broken mono install.\n");
g_print ("If you see other errors or faults after this message they are probably related\n");
g_print ("and you need to fix your mono install first.\n");
}
res = NULL;
}
exit:
if (locked)
mono_icall_unlock ();
g_free (classname);
g_free (tmpsig);
return res;
}
/**
* mono_lookup_internal_call_full:
* \param method the method to look up
* \param uses_handles out argument if method needs handles around managed objects.
* \returns a pointer to the icall code for the given method. If
* \p uses_handles is not NULL, it will be set to TRUE if the method
* needs managed objects wrapped using the infrastructure in handle.h
*
* If the method is not found, warns and returns NULL.
*/
gconstpointer
mono_lookup_internal_call_full (MonoMethod *method, gboolean warn_on_missing, mono_bool *uses_handles, mono_bool *foreign)
{
if (uses_handles)
*uses_handles = FALSE;
if (foreign)
*foreign = FALSE;
guint32 flags = MONO_ICALL_FLAGS_NONE;
gconstpointer addr = mono_lookup_internal_call_full_with_flags (method, warn_on_missing, &flags);
if (uses_handles && (flags & MONO_ICALL_FLAGS_USES_HANDLES))
*uses_handles = TRUE;
if (foreign && (flags & MONO_ICALL_FLAGS_FOREIGN))
*foreign = TRUE;
return addr;
}
/**
* mono_lookup_internal_call:
*/
gpointer
mono_lookup_internal_call (MonoMethod *method)
{
return (gpointer)mono_lookup_internal_call_full (method, TRUE, NULL, NULL);
}
/*
* mono_lookup_icall_symbol:
*
* Given the icall METHOD, returns its C symbol.
*/
const char*
mono_lookup_icall_symbol (MonoMethod *m)
{
if (!icall_table)
return NULL;
g_assert (icall_table->lookup_icall_symbol);
gpointer func;
func = (gpointer)mono_lookup_internal_call_full (m, FALSE, NULL, NULL);
if (!func)
return NULL;
return icall_table->lookup_icall_symbol (func);
}
#if defined(TARGET_WIN32) && defined(TARGET_X86)
/*
* Under windows, the default pinvoke calling convention is STDCALL but
* we need CDECL.
*/
#define MONO_ICALL_SIGNATURE_CALL_CONVENTION MONO_CALL_C
#else
#define MONO_ICALL_SIGNATURE_CALL_CONVENTION 0
#endif
// Storage for these enums is pointer-sized as it gets replaced with MonoType*.
//
// mono_create_icall_signatures depends on this order. Handle with care.
typedef enum ICallSigType {
ICALL_SIG_TYPE_bool = 0x00,
ICALL_SIG_TYPE_boolean = ICALL_SIG_TYPE_bool,
ICALL_SIG_TYPE_double = 0x01,
ICALL_SIG_TYPE_float = 0x02,
ICALL_SIG_TYPE_int = 0x03,
ICALL_SIG_TYPE_int16 = 0x04,
ICALL_SIG_TYPE_int32 = ICALL_SIG_TYPE_int,
ICALL_SIG_TYPE_int8 = 0x05,
ICALL_SIG_TYPE_long = 0x06,
ICALL_SIG_TYPE_obj = 0x07,
ICALL_SIG_TYPE_object = ICALL_SIG_TYPE_obj,
ICALL_SIG_TYPE_ptr = 0x08,
ICALL_SIG_TYPE_ptrref = 0x09,
ICALL_SIG_TYPE_string = 0x0A,
ICALL_SIG_TYPE_uint16 = 0x0B,
ICALL_SIG_TYPE_uint32 = 0x0C,
ICALL_SIG_TYPE_uint8 = 0x0D,
ICALL_SIG_TYPE_ulong = 0x0E,
ICALL_SIG_TYPE_void = 0x0F,
ICALL_SIG_TYPE_sizet = 0x10
} ICallSigType;
#define ICALL_SIG_TYPES_1(a) ICALL_SIG_TYPE_ ## a,
#define ICALL_SIG_TYPES_2(a, b) ICALL_SIG_TYPES_1 (a ) ICALL_SIG_TYPES_1 (b)
#define ICALL_SIG_TYPES_3(a, b, c) ICALL_SIG_TYPES_2 (a, b ) ICALL_SIG_TYPES_1 (c)
#define ICALL_SIG_TYPES_4(a, b, c, d) ICALL_SIG_TYPES_3 (a, b, c ) ICALL_SIG_TYPES_1 (d)
#define ICALL_SIG_TYPES_5(a, b, c, d, e) ICALL_SIG_TYPES_4 (a, b, c, d ) ICALL_SIG_TYPES_1 (e)
#define ICALL_SIG_TYPES_6(a, b, c, d, e, f) ICALL_SIG_TYPES_5 (a, b, c, d, e) ICALL_SIG_TYPES_1 (f)
#define ICALL_SIG_TYPES_7(a, b, c, d, e, f, g) ICALL_SIG_TYPES_6 (a, b, c, d, e, f) ICALL_SIG_TYPES_1 (g)
#define ICALL_SIG_TYPES_8(a, b, c, d, e, f, g, h) ICALL_SIG_TYPES_7 (a, b, c, d, e, f, g) ICALL_SIG_TYPES_1 (h)
#define ICALL_SIG_TYPES(n, types) ICALL_SIG_TYPES_ ## n types
// A scheme to make these const would be nice.
static struct {
#define ICALL_SIG(n, xtypes) \
struct { \
MonoMethodSignature sig; \
gsize types [n]; \
} ICALL_SIG_NAME (n, xtypes);
ICALL_SIGS
MonoMethodSignature end; // terminal zeroed element
} mono_icall_signatures = {
#undef ICALL_SIG
#define ICALL_SIG(n, types) { { \
0, /* ret */ \
n, /* param_count */ \
-1, /* sentinelpos */ \
0, /* generic_param_count */ \
MONO_ICALL_SIGNATURE_CALL_CONVENTION, \
0, /* hasthis */ \
0, /* explicit_this */ \
1, /* pinvoke */ \
0, /* is_inflated */ \
0, /* has_type_parameters */ \
}, /* possible gap here, depending on MONO_ZERO_LEN_ARRAY */ \
{ ICALL_SIG_TYPES (n, types) } }, /* params and ret */
ICALL_SIGS
};
#undef ICALL_SIG
#define ICALL_SIG(n, types) MonoMethodSignature * const ICALL_SIG_NAME (n, types) = &mono_icall_signatures.ICALL_SIG_NAME (n, types).sig;
ICALL_SIGS
#undef ICALL_SIG
void
mono_create_icall_signatures (void)
{
// Fixup the mostly statically initialized icall signatures.
// x = m_class_get_byval_arg (x)
// Initialize ret with params [0] and params [i] with params [i + 1].
// ptrref is special
//
// FIXME This is a bit obscure.
typedef MonoMethodSignature G_MAY_ALIAS MonoMethodSignature_a;
typedef gsize G_MAY_ALIAS gsize_a;
MonoType * const lookup [ ] = {
m_class_get_byval_arg (mono_defaults.boolean_class), // ICALL_SIG_TYPE_bool
m_class_get_byval_arg (mono_defaults.double_class), // ICALL_SIG_TYPE_double
m_class_get_byval_arg (mono_defaults.single_class), // ICALL_SIG_TYPE_float
m_class_get_byval_arg (mono_defaults.int32_class), // ICALL_SIG_TYPE_int
m_class_get_byval_arg (mono_defaults.int16_class), // ICALL_SIG_TYPE_int16
m_class_get_byval_arg (mono_defaults.sbyte_class), // ICALL_SIG_TYPE_int8
m_class_get_byval_arg (mono_defaults.int64_class), // ICALL_SIG_TYPE_long
m_class_get_byval_arg (mono_defaults.object_class), // ICALL_SIG_TYPE_obj
m_class_get_byval_arg (mono_defaults.int_class), // ICALL_SIG_TYPE_ptr
mono_class_get_byref_type (mono_defaults.int_class), // ICALL_SIG_TYPE_ptrref
m_class_get_byval_arg (mono_defaults.string_class), // ICALL_SIG_TYPE_string
m_class_get_byval_arg (mono_defaults.uint16_class), // ICALL_SIG_TYPE_uint16
m_class_get_byval_arg (mono_defaults.uint32_class), // ICALL_SIG_TYPE_uint32
m_class_get_byval_arg (mono_defaults.byte_class), // ICALL_SIG_TYPE_uint8
m_class_get_byval_arg (mono_defaults.uint64_class), // ICALL_SIG_TYPE_ulong
m_class_get_byval_arg (mono_defaults.void_class), // ICALL_SIG_TYPE_void
m_class_get_byval_arg (mono_defaults.int_class), // ICALL_SIG_TYPE_sizet
};
MonoMethodSignature_a *sig = (MonoMethodSignature*)&mono_icall_signatures;
int n;
while ((n = sig->param_count)) {
--sig->param_count; // remove ret
gsize_a *types = (gsize_a*)(sig + 1);
for (int i = 0; i < n; ++i) {
gsize index = *types++;
g_assert (index < G_N_ELEMENTS (lookup));
// Casts on next line are attempt to follow strict aliasing rules,
// to ensure reading from *types precedes writing
// to params [].
*(gsize*)(i ? &sig->params [i - 1] : &sig->ret) = (gsize)lookup [index];
}
sig = (MonoMethodSignature*)types;
}
}
void
mono_register_jit_icall_info (MonoJitICallInfo *info, gconstpointer func, const char *name, MonoMethodSignature *sig, gboolean avoid_wrapper, const char *c_symbol)
{
// Duplicate initialization is allowed and racy, assuming it is equivalent.
info->name = name;
info->func = func;
info->sig = sig;
info->c_symbol = c_symbol;
// Fill in wrapper ahead of time, to just be func, to avoid
// later initializing it to anything else. So therefore, no wrapper.
if (avoid_wrapper) {
info->wrapper = func;
} else {
// Leave it alone in case of a race.
}
}
int
ves_icall_System_GC_GetCollectionCount (int generation)
{
return mono_gc_collection_count (generation);
}
int
ves_icall_System_GC_GetGeneration (MonoObjectHandle object, MonoError *error)
{
return mono_gc_get_generation (MONO_HANDLE_RAW (object));
}
int
ves_icall_System_GC_GetMaxGeneration (void)
{
return mono_gc_max_generation ();
}
gint64
ves_icall_System_GC_GetAllocatedBytesForCurrentThread (void)
{
return mono_gc_get_allocated_bytes_for_current_thread ();
}
guint64
ves_icall_System_GC_GetTotalAllocatedBytes (MonoBoolean precise, MonoError* error)
{
return mono_gc_get_total_allocated_bytes (precise);
}
void
ves_icall_System_GC_RecordPressure (gint64 value)
{
mono_gc_add_memory_pressure (value);
}
MonoBoolean
ves_icall_System_Threading_Thread_YieldInternal (void)
{
mono_threads_platform_yield ();
return TRUE;
}
gint32
ves_icall_System_Environment_get_ProcessorCount (void)
{
return mono_cpu_count ();
}
// Generate wrappers.
#define ICALL_TYPE(id,name,first) /* nothing */
#define ICALL(id,name,func) /* nothing */
#define NOHANDLES(inner) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL(func, ret, nargs, argtypes) MONO_HANDLE_REGISTER_ICALL_IMPLEMENT (func, ret, nargs, argtypes)
// Some native functions are exposed via multiple managed names.
// Producing a wrapper for these results in duplicate wrappers with the same names,
// which fails to compile. Do not produce such duplicate wrappers. Alternatively,
// a one line native function with a different name that calls the main one could be used.
// i.e. the wrapper would also have a different name.
#define HANDLES_REUSE_WRAPPER(...) /* nothing */
#define HANDLES(id, name, func, ret, nargs, argtypes) \
MONO_HANDLE_DECLARE (id, name, func, ret, nargs, argtypes); \
MONO_HANDLE_IMPLEMENT (id, name, func, ret, nargs, argtypes)
#include "metadata/icall-def.h"
#undef HANDLES
#undef HANDLES_REUSE_WRAPPER
#undef ICALL_TYPE
#undef ICALL
#undef NOHANDLES
#undef MONO_HANDLE_REGISTER_ICALL
| /**
* \file
*
* Authors:
* Dietmar Maurer ([email protected])
* Paolo Molaro ([email protected])
* Patrik Torstensson ([email protected])
* Marek Safar ([email protected])
* Aleksey Kliger ([email protected])
*
* Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
* Copyright 2011-2015 Xamarin Inc (http://www.xamarin.com).
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#if defined(TARGET_WIN32) || defined(HOST_WIN32)
#include <stdio.h>
#endif
#include <glib.h>
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#if defined (HAVE_WCHAR_H)
#include <wchar.h>
#endif
#include "mono/metadata/icall-internals.h"
#include "mono/utils/mono-membar.h"
#include <mono/metadata/object.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/monitor.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/image-internals.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/assembly-internals.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/exception-internals.h>
#include <mono/metadata/w32file.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/metadata-internals.h>
#include <mono/metadata/metadata-update.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/class-init.h>
#include <mono/metadata/reflection-internals.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/mono-gc.h>
#include <mono/metadata/appdomain-icalls.h>
#include <mono/metadata/string-icalls.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-config.h>
#include <mono/metadata/cil-coff.h>
#include <mono/metadata/mono-perfcounters.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/mono-ptr-array.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/runtime.h>
#include <mono/metadata/seq-points-data.h>
#include <mono/metadata/icall-table.h>
#include <mono/metadata/handle.h>
#include <mono/metadata/w32event.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/loader-internals.h>
#include <mono/utils/monobitset.h>
#include <mono/utils/mono-time.h>
#include <mono/utils/mono-proclib.h>
#include <mono/utils/mono-string.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-digest.h>
#include <mono/utils/bsearch.h>
#include <mono/utils/mono-os-mutex.h>
#include <mono/utils/mono-threads.h>
#include <mono/metadata/w32error.h>
#include <mono/utils/w32api.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/utils/mono-math.h>
#if !defined(HOST_WIN32) && defined(HAVE_SYS_UTSNAME_H)
#include <sys/utsname.h>
#endif
#if defined(HOST_WIN32)
#include <windows.h>
#endif
#include "icall-decl.h"
#include "mono/utils/mono-threads-coop.h"
#include "mono/metadata/icall-signatures.h"
#include "mono/utils/mono-signal-handler.h"
#if _MSC_VER
#pragma warning(disable:4047) // FIXME differs in levels of indirection
#endif
//#define MONO_DEBUG_ICALLARRAY
// Inline with CoreCLR heuristics, https://github.com/dotnet/runtime/blob/69e114c1abf91241a0eeecf1ecceab4711b8aa62/src/coreclr/vm/threads.cpp#L6408.
// Minimum stack size should be sufficient to allow a typical non-recursive call chain to execute,
// including potential exception handling and garbage collection. Used for probing for available
// stack space through RuntimeHelpers.EnsureSufficientExecutionStack.
#if TARGET_SIZEOF_VOID_P == 8
#define MONO_MIN_EXECUTION_STACK_SIZE (128 * 1024)
#else
#define MONO_MIN_EXECUTION_STACK_SIZE (64 * 1024)
#endif
#ifdef MONO_DEBUG_ICALLARRAY
static char debug_icallarray; // 0:uninitialized 1:true 2:false
static gboolean
icallarray_print_enabled (void)
{
if (!debug_icallarray)
debug_icallarray = MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_ICALLARRAY) ? 1 : 2;
return debug_icallarray == 1;
}
static void
icallarray_print (const char *format, ...)
{
if (!icallarray_print_enabled ())
return;
va_list args;
va_start (args, format);
g_printv (format, args);
va_end (args);
}
#else
#define icallarray_print_enabled() (FALSE)
#define icallarray_print(...) /* nothing */
#endif
/* Lazy class loading functions */
static GENERATE_GET_CLASS_WITH_CACHE (module, "System.Reflection", "Module")
static void
array_set_value_impl (MonoArrayHandle arr, MonoObjectHandle value, guint32 pos, gboolean strict_enums, gboolean strict_signs, MonoError *error);
static MonoArrayHandle
type_array_from_modifiers (MonoType *type, int optional, MonoError *error);
static inline MonoBoolean
is_generic_parameter (MonoType *type)
{
return !m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR);
}
#ifdef HOST_WIN32
static void
mono_icall_make_platform_path (gchar *path)
{
for (size_t i = strlen (path); i > 0; i--)
if (path [i-1] == '\\')
path [i-1] = '/';
}
static const gchar *
mono_icall_get_file_path_prefix (const gchar *path)
{
if (*path == '/' && *(path + 1) == '/') {
return "file:";
} else {
return "file:///";
}
}
#else
static inline void
mono_icall_make_platform_path (gchar *path)
{
return;
}
static inline const gchar *
mono_icall_get_file_path_prefix (const gchar *path)
{
return "file://";
}
#endif /* HOST_WIN32 */
MonoJitICallInfos mono_jit_icall_info;
MonoObjectHandle
ves_icall_System_Array_GetValueImpl (MonoArrayHandle array, guint32 pos, MonoError *error)
{
MonoClass * const array_class = mono_handle_class (array);
MonoClass * const element_class = m_class_get_element_class (array_class);
if (m_class_is_native_pointer (element_class)) {
mono_error_set_not_supported (error, NULL);
return NULL_HANDLE;
}
if (m_class_is_valuetype (element_class)) {
gsize element_size = mono_array_element_size (array_class);
gpointer element_address = mono_array_addr_with_size_fast (MONO_HANDLE_RAW (array), element_size, (gsize)pos);
return mono_value_box_handle (element_class, element_address, error);
}
MonoObjectHandle result = mono_new_null ();
mono_handle_array_getref (result, array, pos);
return result;
}
void
ves_icall_System_Array_SetValueImpl (MonoArrayHandle arr, MonoObjectHandle value, guint32 pos, MonoError *error)
{
array_set_value_impl (arr, value, pos, TRUE, TRUE, error);
}
static inline void
set_invalid_cast (MonoError *error, MonoClass *src_class, MonoClass *dst_class)
{
mono_get_runtime_callbacks ()->set_cast_details (src_class, dst_class);
mono_error_set_invalid_cast (error);
}
void
ves_icall_System_Array_SetValueRelaxedImpl (MonoArrayHandle arr, MonoObjectHandle value, guint32 pos, MonoError *error)
{
array_set_value_impl (arr, value, pos, FALSE, FALSE, error);
}
// Copied from CoreCLR: https://github.com/dotnet/coreclr/blob/d3e39bc2f81e3dbf9e4b96347f62b49d8700336c/src/vm/invokeutil.cpp#L33
#define PT_Primitive 0x01000000
static const guint32 primitive_conversions [] = {
0x00, // MONO_TYPE_END
0x00, // MONO_TYPE_VOID
PT_Primitive | 0x0004, // MONO_TYPE_BOOLEAN
PT_Primitive | 0x3F88, // MONO_TYPE_CHAR (W = U2, CHAR, I4, U4, I8, U8, R4, R8)
PT_Primitive | 0x3550, // MONO_TYPE_I1 (W = I1, I2, I4, I8, R4, R8)
PT_Primitive | 0x3FE8, // MONO_TYPE_U1 (W = CHAR, U1, I2, U2, I4, U4, I8, U8, R4, R8)
PT_Primitive | 0x3540, // MONO_TYPE_I2 (W = I2, I4, I8, R4, R8)
PT_Primitive | 0x3F88, // MONO_TYPE_U2 (W = U2, CHAR, I4, U4, I8, U8, R4, R8)
PT_Primitive | 0x3500, // MONO_TYPE_I4 (W = I4, I8, R4, R8)
PT_Primitive | 0x3E00, // MONO_TYPE_U4 (W = U4, I8, R4, R8)
PT_Primitive | 0x3400, // MONO_TYPE_I8 (W = I8, R4, R8)
PT_Primitive | 0x3800, // MONO_TYPE_U8 (W = U8, R4, R8)
PT_Primitive | 0x3000, // MONO_TYPE_R4 (W = R4, R8)
PT_Primitive | 0x2000, // MONO_TYPE_R8 (W = R8)
};
// Copied from CoreCLR: https://github.com/dotnet/coreclr/blob/030a3ea9b8dbeae89c90d34441d4d9a1cf4a7de6/src/vm/invokeutil.h#L176
static
gboolean can_primitive_widen (MonoTypeEnum src_type, MonoTypeEnum dest_type)
{
if (dest_type > MONO_TYPE_R8 || src_type > MONO_TYPE_R8) {
return (MONO_TYPE_I == dest_type && MONO_TYPE_I == src_type) || (MONO_TYPE_U == dest_type && MONO_TYPE_U == src_type);
}
return ((1 << dest_type) & primitive_conversions [src_type]) != 0;
}
// Copied from CoreCLR: https://github.com/dotnet/coreclr/blob/eafa8648ebee92de1380278b15cd5c2b6ef11218/src/vm/array.cpp#L1406
static MonoTypeEnum
get_normalized_integral_array_element_type (MonoTypeEnum elementType)
{
// Array Primitive types such as E_T_I4 and E_T_U4 are interchangeable
// Enums with interchangeable underlying types are interchangable
// BOOL is NOT interchangeable with I1/U1, neither CHAR -- with I2/U2
switch (elementType) {
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_U:
return (MonoTypeEnum) (elementType - 1); // normalize to signed type
}
return elementType;
}
MonoBoolean
ves_icall_System_Array_CanChangePrimitive (MonoReflectionType *volatile* ref_src_type_handle, MonoReflectionType *volatile* ref_dst_type_handle, MonoBoolean reliable)
{
MonoReflectionType* const ref_src_type = *ref_src_type_handle;
MonoReflectionType* const ref_dst_type = *ref_dst_type_handle;
MonoType *src_type = ref_src_type->type;
MonoType *dst_type = ref_dst_type->type;
g_assert (mono_type_is_primitive (src_type));
g_assert (mono_type_is_primitive (dst_type));
MonoTypeEnum normalized_src_type = get_normalized_integral_array_element_type (src_type->type);
MonoTypeEnum normalized_dst_type = get_normalized_integral_array_element_type (dst_type->type);
// Allow conversions like int <-> uint
if (normalized_src_type == normalized_dst_type) {
return TRUE;
}
// Widening is not allowed if reliable is true.
if (reliable) {
return FALSE;
}
// NOTE we don't use normalized types here so int -> ulong will be false
// see https://github.com/dotnet/coreclr/pull/25209#issuecomment-505952295
return can_primitive_widen (src_type->type, dst_type->type);
}
static void
array_set_value_impl (MonoArrayHandle arr_handle, MonoObjectHandle value_handle, guint32 pos, gboolean strict_enums, gboolean strict_signs, MonoError *error)
{
MonoClass *ac, *vc, *ec;
gint32 esize, vsize;
gpointer *ea = NULL, *va = NULL;
guint64 u64 = 0;
gint64 i64 = 0;
gdouble r64 = 0;
gboolean castOk = FALSE;
gboolean et_isenum = FALSE;
gboolean vt_isenum = FALSE;
if (!MONO_HANDLE_IS_NULL (value_handle))
vc = mono_handle_class (value_handle);
else
vc = NULL;
ac = mono_handle_class (arr_handle);
ec = m_class_get_element_class (ac);
esize = mono_array_element_size (ac);
if (mono_class_is_nullable (ec)) {
if (vc && m_class_is_primitive (vc) && vc != m_class_get_nullable_elem_class (ec)) {
// T -> Nullable<T> T must be exact
set_invalid_cast (error, vc, ec);
goto leave;
}
MONO_ENTER_NO_SAFEPOINTS;
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
if (!MONO_HANDLE_IS_NULL (value_handle))
va = (gpointer*) mono_object_unbox_internal (MONO_HANDLE_RAW (value_handle));
mono_nullable_init_unboxed ((guint8*)ea, va, ec);
MONO_EXIT_NO_SAFEPOINTS;
goto leave;
}
if (MONO_HANDLE_IS_NULL (value_handle)) {
MONO_ENTER_NO_SAFEPOINTS;
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
mono_gc_bzero_atomic (ea, esize);
MONO_EXIT_NO_SAFEPOINTS;
goto leave;
}
#define WIDENING_MSG NULL
#define WIDENING_ARG NULL
#define NO_WIDENING_CONVERSION G_STMT_START{ \
mono_error_set_argument (error, WIDENING_ARG, WIDENING_MSG); \
break; \
}G_STMT_END
#define CHECK_WIDENING_CONVERSION(extra) G_STMT_START{ \
if (esize < vsize + (extra)) { \
mono_error_set_argument (error, WIDENING_ARG, WIDENING_MSG); \
break; \
} \
}G_STMT_END
#define INVALID_CAST G_STMT_START{ \
mono_get_runtime_callbacks ()->set_cast_details (vc, ec); \
mono_error_set_invalid_cast (error); \
break; \
}G_STMT_END
MonoTypeEnum et;
et = m_class_get_byval_arg (ec)->type;
MonoTypeEnum vt;
vt = m_class_get_byval_arg (vc)->type;
/* Check element (destination) type. */
switch (et) {
case MONO_TYPE_STRING:
switch (vt) {
case MONO_TYPE_STRING:
break;
default:
INVALID_CAST;
}
break;
case MONO_TYPE_BOOLEAN:
switch (vt) {
case MONO_TYPE_BOOLEAN:
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_I1:
case MONO_TYPE_I2:
case MONO_TYPE_I4:
case MONO_TYPE_I8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
NO_WIDENING_CONVERSION;
break;
default:
INVALID_CAST;
}
break;
default:
break;
}
if (!is_ok (error))
goto leave;
castOk = mono_object_handle_isinst_mbyref_raw (value_handle, ec, error);
if (!is_ok (error))
goto leave;
if (!m_class_is_valuetype (ec)) {
if (!castOk)
INVALID_CAST;
if (is_ok (error))
MONO_HANDLE_ARRAY_SETREF (arr_handle, pos, value_handle);
goto leave;
}
if (castOk) {
MONO_ENTER_NO_SAFEPOINTS;
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
va = (gpointer*) mono_object_unbox_internal (MONO_HANDLE_RAW (value_handle));
if (m_class_has_references (ec))
mono_value_copy_internal (ea, va, ec);
else
mono_gc_memmove_atomic (ea, va, esize);
MONO_EXIT_NO_SAFEPOINTS;
goto leave;
}
if (!m_class_is_valuetype (vc))
INVALID_CAST;
if (!is_ok (error))
goto leave;
vsize = mono_class_value_size (vc, NULL);
et_isenum = et == MONO_TYPE_VALUETYPE && m_class_is_enumtype (m_class_get_byval_arg (ec)->data.klass);
vt_isenum = vt == MONO_TYPE_VALUETYPE && m_class_is_enumtype (m_class_get_byval_arg (vc)->data.klass);
if (strict_enums && et_isenum && !vt_isenum) {
INVALID_CAST;
goto leave;
}
if (et_isenum)
et = mono_class_enum_basetype_internal (m_class_get_byval_arg (ec)->data.klass)->type;
if (vt_isenum)
vt = mono_class_enum_basetype_internal (m_class_get_byval_arg (vc)->data.klass)->type;
// Treat MONO_TYPE_U/I as MONO_TYPE_U8/I8/U4/I4
#if SIZEOF_VOID_P == 8
vt = vt == MONO_TYPE_U ? MONO_TYPE_U8 : (vt == MONO_TYPE_I ? MONO_TYPE_I8 : vt);
et = et == MONO_TYPE_U ? MONO_TYPE_U8 : (et == MONO_TYPE_I ? MONO_TYPE_I8 : et);
#else
vt = vt == MONO_TYPE_U ? MONO_TYPE_U4 : (vt == MONO_TYPE_I ? MONO_TYPE_I4 : vt);
et = et == MONO_TYPE_U ? MONO_TYPE_U4 : (et == MONO_TYPE_I ? MONO_TYPE_I4 : et);
#endif
#define ASSIGN_UNSIGNED(etype) G_STMT_START{\
switch (vt) { \
case MONO_TYPE_U1: \
case MONO_TYPE_U2: \
case MONO_TYPE_U4: \
case MONO_TYPE_U8: \
case MONO_TYPE_CHAR: \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) u64; \
break; \
/* You can't assign a signed value to an unsigned array. */ \
case MONO_TYPE_I1: \
case MONO_TYPE_I2: \
case MONO_TYPE_I4: \
case MONO_TYPE_I8: \
if (!strict_signs) { \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) i64; \
break; \
} \
/* You can't assign a floating point number to an integer array. */ \
case MONO_TYPE_R4: \
case MONO_TYPE_R8: \
NO_WIDENING_CONVERSION; \
break; \
default: \
INVALID_CAST; \
break; \
} \
}G_STMT_END
#define ASSIGN_SIGNED(etype) G_STMT_START{\
switch (vt) { \
case MONO_TYPE_I1: \
case MONO_TYPE_I2: \
case MONO_TYPE_I4: \
case MONO_TYPE_I8: \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) i64; \
break; \
/* You can assign an unsigned value to a signed array if the array's */ \
/* element size is larger than the value size. */ \
case MONO_TYPE_U1: \
case MONO_TYPE_U2: \
case MONO_TYPE_U4: \
case MONO_TYPE_U8: \
case MONO_TYPE_CHAR: \
CHECK_WIDENING_CONVERSION(strict_signs ? 1 : 0); \
*(etype *) ea = (etype) u64; \
break; \
/* You can't assign a floating point number to an integer array. */ \
case MONO_TYPE_R4: \
case MONO_TYPE_R8: \
NO_WIDENING_CONVERSION; \
break; \
default: \
INVALID_CAST; \
break; \
} \
}G_STMT_END
#define ASSIGN_REAL(etype) G_STMT_START{\
switch (vt) { \
case MONO_TYPE_R4: \
case MONO_TYPE_R8: \
CHECK_WIDENING_CONVERSION(0); \
*(etype *) ea = (etype) r64; \
break; \
/* All integer values fit into a floating point array, so we don't */ \
/* need to CHECK_WIDENING_CONVERSION here. */ \
case MONO_TYPE_I1: \
case MONO_TYPE_I2: \
case MONO_TYPE_I4: \
case MONO_TYPE_I8: \
*(etype *) ea = (etype) i64; \
break; \
case MONO_TYPE_U1: \
case MONO_TYPE_U2: \
case MONO_TYPE_U4: \
case MONO_TYPE_U8: \
case MONO_TYPE_CHAR: \
*(etype *) ea = (etype) u64; \
break; \
default: \
INVALID_CAST; \
break; \
} \
}G_STMT_END
MONO_ENTER_NO_SAFEPOINTS;
g_assert (!MONO_HANDLE_IS_NULL (value_handle));
g_assert (m_class_is_valuetype (vc));
va = (gpointer*) mono_object_unbox_internal (MONO_HANDLE_RAW (value_handle));
ea = (gpointer*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (arr_handle), esize, pos);
switch (vt) {
case MONO_TYPE_U1:
u64 = *(guint8 *) va;
break;
case MONO_TYPE_U2:
u64 = *(guint16 *) va;
break;
case MONO_TYPE_U4:
u64 = *(guint32 *) va;
break;
case MONO_TYPE_U8:
u64 = *(guint64 *) va;
break;
case MONO_TYPE_I1:
i64 = *(gint8 *) va;
break;
case MONO_TYPE_I2:
i64 = *(gint16 *) va;
break;
case MONO_TYPE_I4:
i64 = *(gint32 *) va;
break;
case MONO_TYPE_I8:
i64 = *(gint64 *) va;
break;
case MONO_TYPE_R4:
r64 = *(gfloat *) va;
break;
case MONO_TYPE_R8:
r64 = *(gdouble *) va;
break;
case MONO_TYPE_CHAR:
u64 = *(guint16 *) va;
break;
case MONO_TYPE_BOOLEAN:
/* Boolean is only compatible with itself. */
switch (et) {
case MONO_TYPE_CHAR:
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_I1:
case MONO_TYPE_I2:
case MONO_TYPE_I4:
case MONO_TYPE_I8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
NO_WIDENING_CONVERSION;
break;
default:
INVALID_CAST;
}
break;
default:
break;
}
/* If we can't do a direct copy, let's try a widening conversion. */
if (is_ok (error)) {
switch (et) {
case MONO_TYPE_CHAR:
ASSIGN_UNSIGNED (guint16);
break;
case MONO_TYPE_U1:
ASSIGN_UNSIGNED (guint8);
break;
case MONO_TYPE_U2:
ASSIGN_UNSIGNED (guint16);
break;
case MONO_TYPE_U4:
ASSIGN_UNSIGNED (guint32);
break;
case MONO_TYPE_U8:
ASSIGN_UNSIGNED (guint64);
break;
case MONO_TYPE_I1:
ASSIGN_SIGNED (gint8);
break;
case MONO_TYPE_I2:
ASSIGN_SIGNED (gint16);
break;
case MONO_TYPE_I4:
ASSIGN_SIGNED (gint32);
break;
case MONO_TYPE_I8:
ASSIGN_SIGNED (gint64);
break;
case MONO_TYPE_R4:
ASSIGN_REAL (gfloat);
break;
case MONO_TYPE_R8:
ASSIGN_REAL (gdouble);
break;
default:
INVALID_CAST;
}
}
MONO_EXIT_NO_SAFEPOINTS;
#undef INVALID_CAST
#undef NO_WIDENING_CONVERSION
#undef CHECK_WIDENING_CONVERSION
#undef ASSIGN_UNSIGNED
#undef ASSIGN_SIGNED
#undef ASSIGN_REAL
leave:
return;
}
void
ves_icall_System_Array_InternalCreate (MonoArray *volatile* result, MonoType* type, gint32 rank, gint32* pLengths, gint32* pLowerBounds)
{
ERROR_DECL (error);
MonoClass* klass = mono_class_from_mono_type_internal (type);
if (!mono_class_init_checked (klass, error))
goto exit;
if (m_class_get_byval_arg (m_class_get_element_class (klass))->type == MONO_TYPE_VOID) {
mono_error_set_not_supported (error, "Arrays of System.Void are not supported.");
goto exit;
}
if (m_type_is_byref (type) || m_class_is_byreflike (klass)) {
mono_error_set_not_supported (error, NULL);
goto exit;
}
MonoGenericClass *gklass;
gklass = mono_class_try_get_generic_class (klass);
if (is_generic_parameter (type) || mono_class_is_gtd (klass) || (gklass && gklass->context.class_inst->is_open)) {
mono_error_set_not_supported (error, NULL);
goto exit;
}
/* vectors are not the same as one dimensional arrays with non-zero bounds */
gboolean bounded;
bounded = pLowerBounds != NULL && rank == 1 && pLowerBounds [0] != 0;
MonoClass* aklass;
aklass = mono_class_create_bounded_array (klass, rank, bounded);
uintptr_t aklass_rank;
aklass_rank = m_class_get_rank (aklass);
uintptr_t* sizes;
sizes = g_newa (uintptr_t, aklass_rank * 2);
intptr_t* lower_bounds;
lower_bounds = (intptr_t*)(sizes + aklass_rank);
// Copy lengths and lower_bounds from gint32 to [u]intptr_t.
for (uintptr_t i = 0; i < aklass_rank; ++i) {
if (pLowerBounds != NULL) {
lower_bounds [i] = pLowerBounds [i];
if ((gint64) pLowerBounds [i] + (gint64) pLengths [i] > G_MAXINT32) {
mono_error_set_argument_out_of_range (error, NULL, "Length + bound must not exceed Int32.MaxValue.");
goto exit;
}
} else {
lower_bounds [i] = 0;
}
sizes [i] = pLengths [i];
}
*result = mono_array_new_full_checked (aklass, sizes, lower_bounds, error);
exit:
mono_error_set_pending_exception (error);
}
gint32
ves_icall_System_Array_GetCorElementTypeOfElementType (MonoArrayHandle arr, MonoError *error)
{
MonoType *type = mono_type_get_underlying_type (m_class_get_byval_arg (m_class_get_element_class (mono_handle_class (arr))));
return type->type;
}
gint32
ves_icall_System_Array_IsValueOfElementType (MonoArrayHandle arr, MonoObjectHandle obj, MonoError *error)
{
return m_class_get_element_class (mono_handle_class (arr)) == mono_handle_class (obj);
}
static mono_array_size_t
mono_array_get_length (MonoArrayHandle arr, gint32 dimension, MonoError *error)
{
if (dimension < 0 || dimension >= m_class_get_rank (mono_handle_class (arr))) {
mono_error_set_index_out_of_range (error);
return 0;
}
return MONO_HANDLE_GETVAL (arr, bounds) ? MONO_HANDLE_GETVAL (arr, bounds [dimension].length)
: MONO_HANDLE_GETVAL (arr, max_length);
}
gint32
ves_icall_System_Array_GetLength (MonoArrayHandle arr, gint32 dimension, MonoError *error)
{
icallarray_print ("%s arr:%p dimension:%d\n", __func__, MONO_HANDLE_RAW (arr), (int)dimension);
mono_array_size_t const length = mono_array_get_length (arr, dimension, error);
if (length > G_MAXINT32) {
mono_error_set_overflow (error);
return 0;
}
return (gint32)length;
}
gint32
ves_icall_System_Array_GetLowerBound (MonoArrayHandle arr, gint32 dimension, MonoError *error)
{
icallarray_print ("%s arr:%p dimension:%d\n", __func__, MONO_HANDLE_RAW (arr), (int)dimension);
if (dimension < 0 || dimension >= m_class_get_rank (mono_handle_class (arr))) {
mono_error_set_index_out_of_range (error);
return 0;
}
return MONO_HANDLE_GETVAL (arr, bounds) ? MONO_HANDLE_GETVAL (arr, bounds [dimension].lower_bound)
: 0;
}
MonoBoolean
ves_icall_System_Array_FastCopy (MonoArrayHandle source, int source_idx, MonoArrayHandle dest, int dest_idx, int length, MonoError *error)
{
MonoVTable * const src_vtable = MONO_HANDLE_GETVAL (source, obj.vtable);
MonoVTable * const dest_vtable = MONO_HANDLE_GETVAL (dest, obj.vtable);
if (src_vtable->rank != dest_vtable->rank)
return FALSE;
MonoArrayBounds *source_bounds = MONO_HANDLE_GETVAL (source, bounds);
MonoArrayBounds *dest_bounds = MONO_HANDLE_GETVAL (dest, bounds);
for (int i = 0; i < src_vtable->rank; i++) {
if ((source_bounds && source_bounds [i].lower_bound > 0) ||
(dest_bounds && dest_bounds [i].lower_bound > 0))
return FALSE;
}
/* there's no integer overflow since mono_array_length_internal returns an unsigned integer */
if ((dest_idx + length > mono_array_handle_length (dest)) ||
(source_idx + length > mono_array_handle_length (source)))
return FALSE;
MonoClass * const src_class = m_class_get_element_class (src_vtable->klass);
MonoClass * const dest_class = m_class_get_element_class (dest_vtable->klass);
/*
* Handle common cases.
*/
/* Case1: object[] -> valuetype[] (ArrayList::ToArray)
We fallback to managed here since we need to typecheck each boxed valuetype before storing them in the dest array.
*/
if (src_class == mono_defaults.object_class && m_class_is_valuetype (dest_class))
return FALSE;
/* Check if we're copying a char[] <==> (u)short[] */
if (src_class != dest_class) {
if (m_class_is_valuetype (dest_class) || m_class_is_enumtype (dest_class) ||
m_class_is_valuetype (src_class) || m_class_is_valuetype (src_class))
return FALSE;
/* It's only safe to copy between arrays if we can ensure the source will always have a subtype of the destination. We bail otherwise. */
if (!mono_class_is_subclass_of_internal (src_class, dest_class, FALSE))
return FALSE;
if (m_class_is_native_pointer (src_class) || m_class_is_native_pointer (dest_class))
return FALSE;
}
if (m_class_is_valuetype (dest_class)) {
gsize const element_size = mono_array_element_size (MONO_HANDLE_GETVAL (source, obj.vtable->klass));
MONO_ENTER_NO_SAFEPOINTS; // gchandle would also work here, is slow, breaks profiler tests.
gconstpointer const source_addr =
mono_array_addr_with_size_fast (MONO_HANDLE_RAW (source), element_size, source_idx);
if (m_class_has_references (dest_class)) {
mono_value_copy_array_handle (dest, dest_idx, source_addr, length);
} else {
gpointer const dest_addr =
mono_array_addr_with_size_fast (MONO_HANDLE_RAW (dest), element_size, dest_idx);
mono_gc_memmove_atomic (dest_addr, source_addr, element_size * length);
}
MONO_EXIT_NO_SAFEPOINTS;
} else {
mono_array_handle_memcpy_refs (dest, dest_idx, source, source_idx, length);
}
return TRUE;
}
void
ves_icall_System_Array_GetGenericValue_icall (MonoArray **arr, guint32 pos, gpointer value)
{
icallarray_print ("%s arr:%p pos:%u value:%p\n", __func__, *arr, pos, value);
MONO_REQ_GC_UNSAFE_MODE; // because of gpointer value
MonoClass * const ac = mono_object_class (*arr);
gsize const esize = mono_array_element_size (ac);
gconstpointer * const ea = (gconstpointer*)((char*)(*arr)->vector + (pos * esize));
mono_gc_memmove_atomic (value, ea, esize);
}
void
ves_icall_System_Array_SetGenericValue_icall (MonoArray **arr, guint32 pos, gpointer value)
{
icallarray_print ("%s arr:%p pos:%u value:%p\n", __func__, *arr, pos, value);
MONO_REQ_GC_UNSAFE_MODE; // because of gpointer value
MonoClass * const ac = mono_object_class (*arr);
MonoClass * const ec = m_class_get_element_class (ac);
gsize const esize = mono_array_element_size (ac);
gpointer * const ea = (gpointer*)((char*)(*arr)->vector + (pos * esize));
if (MONO_TYPE_IS_REFERENCE (m_class_get_byval_arg (ec))) {
g_assert (esize == sizeof (gpointer));
mono_gc_wbarrier_generic_store_internal (ea, *(MonoObject **)value);
} else {
g_assert (m_class_is_inited (ec));
g_assert (esize == mono_class_value_size (ec, NULL));
if (m_class_has_references (ec))
mono_gc_wbarrier_value_copy_internal (ea, value, 1, ec);
else
mono_gc_memmove_atomic (ea, value, esize);
}
}
void
ves_icall_System_Runtime_RuntimeImports_Memmove (guint8 *destination, guint8 *source, size_t byte_count)
{
mono_gc_memmove_atomic (destination, source, byte_count);
}
void
ves_icall_System_Buffer_BulkMoveWithWriteBarrier (guint8 *destination, guint8 *source, size_t len, MonoType *type)
{
if (MONO_TYPE_IS_REFERENCE (type))
mono_gc_wbarrier_arrayref_copy_internal (destination, source, (guint)len);
else
mono_gc_wbarrier_value_copy_internal (destination, source, (guint)len, mono_class_from_mono_type_internal (type));
}
void
ves_icall_System_Runtime_RuntimeImports_ZeroMemory (guint8 *p, size_t byte_length)
{
memset (p, 0, byte_length);
}
gpointer
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetSpanDataFrom (MonoClassField *field_handle, MonoType_ptr targetTypeHandle, gpointer countPtr, MonoError *error)
{
gint32* count = (gint32*)countPtr;
MonoType *field_type = mono_field_get_type_checked (field_handle, error);
if (!field_type) {
mono_error_set_argument (error, "fldHandle", "fldHandle invalid");
return NULL;
}
if (!(field_type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA)) {
mono_error_set_argument_format (error, "field_handle", "Field '%s' doesn't have an RVA", mono_field_get_name (field_handle));
return NULL;
}
MonoType *type = targetTypeHandle;
if (MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_VALUETYPE) {
mono_error_set_argument (error, "array", "Cannot initialize array of non-primitive type");
return NULL;
}
int swizzle = 1;
int align;
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
swizzle = mono_type_size (type, &align);
#endif
int dummy;
*count = mono_type_size (field_type, &dummy)/mono_type_size (type, &align);
return (gpointer)mono_field_get_rva (field_handle, swizzle);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray (MonoArrayHandle array, MonoClassField *field_handle, MonoError *error)
{
MonoClass *klass = mono_handle_class (array);
guint32 size = mono_array_element_size (klass);
MonoType *type = mono_type_get_underlying_type (m_class_get_byval_arg (m_class_get_element_class (klass)));
int align;
const char *field_data;
if (MONO_TYPE_IS_REFERENCE (type) || type->type == MONO_TYPE_VALUETYPE) {
mono_error_set_argument (error, "array", "Cannot initialize array of non-primitive type");
return;
}
MonoType *field_type = mono_field_get_type_checked (field_handle, error);
if (!field_type)
return;
if (!(field_type->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA)) {
mono_error_set_argument_format (error, "field_handle", "Field '%s' doesn't have an RVA", mono_field_get_name (field_handle));
return;
}
size *= MONO_HANDLE_GETVAL(array, max_length);
field_data = mono_field_get_data (field_handle);
if (size > mono_type_size (field_handle->type, &align)) {
mono_error_set_argument (error, "field_handle", "Field not large enough to fill array");
return;
}
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
#define SWAP(n) { \
guint ## n *data = (guint ## n *) mono_array_addr_internal (MONO_HANDLE_RAW(array), char, 0); \
guint ## n *src = (guint ## n *) field_data; \
int i, \
nEnt = (size / sizeof(guint ## n)); \
\
for (i = 0; i < nEnt; i++) { \
data[i] = read ## n (&src[i]); \
} \
}
/* printf ("Initialize array with elements of %s type\n", klass->element_class->name); */
switch (type->type) {
case MONO_TYPE_CHAR:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
SWAP (16);
break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_R4:
SWAP (32);
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R8:
SWAP (64);
break;
default:
memcpy (mono_array_addr_internal (MONO_HANDLE_RAW(array), char, 0), field_data, size);
break;
}
#else
memcpy (mono_array_addr_internal (MONO_HANDLE_RAW(array), char, 0), field_data, size);
#endif
}
MonoObjectHandle
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetObjectValue (MonoObjectHandle obj, MonoError *error)
{
if (MONO_HANDLE_IS_NULL (obj) || !m_class_is_valuetype (mono_handle_class (obj)))
return obj;
return mono_object_clone_handle (obj, error);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunClassConstructor (MonoType *handle, MonoError *error)
{
MonoClass *klass;
MonoVTable *vtable;
MONO_CHECK_ARG_NULL (handle,);
klass = mono_class_from_mono_type_internal (handle);
MONO_CHECK_ARG (handle, klass,);
if (mono_class_is_gtd (klass))
return;
vtable = mono_class_vtable_checked (klass, error);
return_if_nok (error);
/* This will call the type constructor */
mono_runtime_class_init_full (vtable, error);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_RunModuleConstructor (MonoImage *image, MonoError *error)
{
mono_image_check_for_module_cctor (image);
if (!image->has_module_cctor)
return;
MonoClass *module_klass = mono_class_get_checked (image, MONO_TOKEN_TYPE_DEF | 1, error);
return_if_nok (error);
MonoVTable * vtable = mono_class_vtable_checked (module_klass, error);
return_if_nok (error);
mono_runtime_class_init_full (vtable, error);
}
MonoBoolean
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_SufficientExecutionStack (void)
{
MonoThreadInfo *thread = mono_thread_info_current ();
void *current = &thread;
// Stack upper/lower bound should have been calculated and set as part of register_thread.
// If not, we are optimistic and assume there is enough room.
if (!thread->stack_start_limit || !thread->stack_end)
return TRUE;
// Stack start limit is stack lower bound. Make sure there is enough room left.
void *limit = ((uint8_t *)thread->stack_start_limit) + ALIGN_TO (MONO_STACK_OVERFLOW_GUARD_SIZE + MONO_MIN_EXECUTION_STACK_SIZE, ((gssize)mono_pagesize ()));
if (current < limit)
return FALSE;
if (mono_get_runtime_callbacks ()->is_interpreter_enabled () &&
!mono_get_runtime_callbacks ()->interp_sufficient_stack (MONO_MIN_EXECUTION_STACK_SIZE))
return FALSE;
return TRUE;
}
MonoObjectHandle
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_GetUninitializedObjectInternal (MonoType *handle, MonoError *error)
{
MonoClass *klass;
MonoVTable *vtable;
g_assert (handle);
klass = mono_class_from_mono_type_internal (handle);
if (m_class_is_string (klass)) {
mono_error_set_argument (error, NULL, NULL);
return NULL_HANDLE;
}
if (mono_class_is_array (klass) || mono_class_is_pointer (klass) || m_type_is_byref (handle)) {
mono_error_set_argument (error, NULL, NULL);
return NULL_HANDLE;
}
if (MONO_TYPE_IS_VOID (handle)) {
mono_error_set_argument (error, NULL, NULL);
return NULL_HANDLE;
}
if (m_class_is_abstract (klass) || m_class_is_interface (klass) || m_class_is_gtd (klass)) {
mono_error_set_member_access (error, NULL);
return NULL_HANDLE;
}
if (m_class_is_byreflike (klass)) {
mono_error_set_not_supported (error, NULL);
return NULL_HANDLE;
}
if (!mono_class_is_before_field_init (klass)) {
vtable = mono_class_vtable_checked (klass, error);
return_val_if_nok (error, NULL_HANDLE);
mono_runtime_class_init_full (vtable, error);
return_val_if_nok (error, NULL_HANDLE);
}
if (m_class_is_nullable (klass))
return mono_object_new_handle (m_class_get_nullable_elem_class (klass), error);
else
return mono_object_new_handle (klass, error);
}
void
ves_icall_System_Runtime_CompilerServices_RuntimeHelpers_PrepareMethod (MonoMethod *method, gpointer inst_types, int n_inst_types, MonoError *error)
{
if (method->flags & METHOD_ATTRIBUTE_ABSTRACT) {
mono_error_set_argument (error, NULL, NULL);
return;
}
MonoGenericContainer *container = NULL;
if (method->is_generic)
container = mono_method_get_generic_container (method);
else if (m_class_is_gtd (method->klass))
container = mono_class_get_generic_container (method->klass);
if (container) {
int nparams = container->type_argc + (container->parent ? container->parent->type_argc : 0);
if (nparams != n_inst_types) {
mono_error_set_argument (error, NULL, NULL);
return;
}
}
// FIXME: Implement
}
MonoObjectHandle
ves_icall_System_Object_MemberwiseClone (MonoObjectHandle this_obj, MonoError *error)
{
return mono_object_clone_handle (this_obj, error);
}
gint32
ves_icall_System_ValueType_InternalGetHashCode (MonoObjectHandle this_obj, MonoArrayHandleOut fields, MonoError *error)
{
MonoClass *klass;
MonoClassField **unhandled = NULL;
int count = 0;
gint32 result = (int)(gsize)mono_defaults.int32_class;
MonoClassField* field;
gpointer iter;
klass = mono_handle_class (this_obj);
if (mono_class_num_fields (klass) == 0)
return result;
/*
* Compute the starting value of the hashcode for fields of primitive
* types, and return the remaining fields in an array to the managed side.
* This way, we can avoid costly reflection operations in managed code.
*/
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
if (mono_field_is_deleted (field))
continue;
gpointer addr = (guint8*)MONO_HANDLE_RAW (this_obj) + field->offset;
/* FIXME: Add more types */
switch (field->type->type) {
case MONO_TYPE_I4:
result ^= *(gint32*)addr;
break;
case MONO_TYPE_PTR:
result ^= mono_aligned_addr_hash (*(gpointer*)addr);
break;
case MONO_TYPE_STRING: {
MonoString *s;
s = *(MonoString**)addr;
if (s != NULL)
result ^= mono_string_hash_internal (s);
break;
}
default:
if (!unhandled)
unhandled = g_newa (MonoClassField*, mono_class_num_fields (klass));
unhandled [count ++] = field;
}
}
if (unhandled) {
MonoArrayHandle fields_arr = mono_array_new_handle (mono_defaults.object_class, count, error);
return_val_if_nok (error, 0);
MONO_HANDLE_ASSIGN (fields, fields_arr);
MonoObjectHandle h = MONO_HANDLE_NEW (MonoObject, NULL);
for (int i = 0; i < count; ++i) {
MonoObject *o = mono_field_get_value_object_checked (unhandled [i], MONO_HANDLE_RAW (this_obj), error);
return_val_if_nok (error, 0);
MONO_HANDLE_ASSIGN_RAW (h, o);
mono_array_handle_setref (fields_arr, i, h);
}
} else {
MONO_HANDLE_ASSIGN (fields, NULL_HANDLE);
}
return result;
}
MonoBoolean
ves_icall_System_ValueType_Equals (MonoObjectHandle this_obj, MonoObjectHandle that, MonoArrayHandleOut fields, MonoError *error)
{
MonoClass *klass;
MonoClassField **unhandled = NULL;
MonoClassField* field;
gpointer iter;
int count = 0;
MONO_CHECK_ARG_NULL_HANDLE (that, FALSE);
MONO_HANDLE_ASSIGN (fields, NULL_HANDLE);
if (mono_handle_vtable (this_obj) != mono_handle_vtable (that))
return FALSE;
klass = mono_handle_class (this_obj);
if (m_class_is_enumtype (klass) && mono_class_enum_basetype_internal (klass) && mono_class_enum_basetype_internal (klass)->type == MONO_TYPE_I4)
return *(gint32*)mono_handle_get_data_unsafe (this_obj) == *(gint32*)mono_handle_get_data_unsafe (that);
/*
* Do the comparison for fields of primitive type and return a result if
* possible. Otherwise, return the remaining fields in an array to the
* managed side. This way, we can avoid costly reflection operations in
* managed code.
*/
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
if (mono_field_is_deleted (field))
continue;
guint8 *this_field = (guint8 *)MONO_HANDLE_RAW (this_obj) + field->offset;
guint8 *that_field = (guint8 *)MONO_HANDLE_RAW (that) + field->offset;
#define UNALIGNED_COMPARE(type) \
do { \
type left, right; \
memcpy (&left, this_field, sizeof (type)); \
memcpy (&right, that_field, sizeof (type)); \
if (left != right) \
return FALSE; \
} while (0)
/* FIXME: Add more types */
switch (field->type->type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
if (*this_field != *that_field)
return FALSE;
break;
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 1 || (intptr_t) that_field & 1))
UNALIGNED_COMPARE (gint16);
else
#endif
if (*(gint16 *) this_field != *(gint16 *) that_field)
return FALSE;
break;
case MONO_TYPE_U4:
case MONO_TYPE_I4:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 3 || (intptr_t) that_field & 3))
UNALIGNED_COMPARE (gint32);
else
#endif
if (*(gint32 *) this_field != *(gint32 *) that_field)
return FALSE;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 7 || (intptr_t) that_field & 7))
UNALIGNED_COMPARE (gint64);
else
#endif
if (*(gint64 *) this_field != *(gint64 *) that_field)
return FALSE;
break;
case MONO_TYPE_R4: {
float d1, d2;
#ifdef NO_UNALIGNED_ACCESS
memcpy (&d1, this_field, sizeof (float));
memcpy (&d2, that_field, sizeof (float));
#else
d1 = *(float *) this_field;
d2 = *(float *) that_field;
#endif
if (d1 != d2 && !(mono_isnan (d1) && mono_isnan (d2)))
return FALSE;
break;
}
case MONO_TYPE_R8: {
double d1, d2;
#ifdef NO_UNALIGNED_ACCESS
memcpy (&d1, this_field, sizeof (double));
memcpy (&d2, that_field, sizeof (double));
#else
d1 = *(double *) this_field;
d2 = *(double *) that_field;
#endif
if (d1 != d2 && !(mono_isnan (d1) && mono_isnan (d2)))
return FALSE;
break;
}
case MONO_TYPE_PTR:
#ifdef NO_UNALIGNED_ACCESS
if (G_UNLIKELY ((intptr_t) this_field & 7 || (intptr_t) that_field & 7))
UNALIGNED_COMPARE (gpointer);
else
#endif
if (*(gpointer *) this_field != *(gpointer *) that_field)
return FALSE;
break;
case MONO_TYPE_STRING: {
MonoString *s1, *s2;
guint32 s1len, s2len;
s1 = *(MonoString**)this_field;
s2 = *(MonoString**)that_field;
if (s1 == s2)
break;
if ((s1 == NULL) || (s2 == NULL))
return FALSE;
s1len = mono_string_length_internal (s1);
s2len = mono_string_length_internal (s2);
if (s1len != s2len)
return FALSE;
if (memcmp (mono_string_chars_internal (s1), mono_string_chars_internal (s2), s1len * sizeof (gunichar2)) != 0)
return FALSE;
break;
}
default:
if (!unhandled)
unhandled = g_newa (MonoClassField*, mono_class_num_fields (klass));
unhandled [count ++] = field;
}
#undef UNALIGNED_COMPARE
if (m_class_is_enumtype (klass))
/* enums only have one non-static field */
break;
}
if (unhandled) {
MonoArrayHandle fields_arr = mono_array_new_handle (mono_defaults.object_class, count * 2, error);
return_val_if_nok (error, 0);
MONO_HANDLE_ASSIGN (fields, fields_arr);
MonoObjectHandle h = MONO_HANDLE_NEW (MonoObject, NULL);
for (int i = 0; i < count; ++i) {
MonoObject *o = mono_field_get_value_object_checked (unhandled [i], MONO_HANDLE_RAW (this_obj), error);
return_val_if_nok (error, FALSE);
MONO_HANDLE_ASSIGN_RAW (h, o);
mono_array_handle_setref (fields_arr, i * 2, h);
o = mono_field_get_value_object_checked (unhandled [i], MONO_HANDLE_RAW (that), error);
return_val_if_nok (error, FALSE);
MONO_HANDLE_ASSIGN_RAW (h, o);
mono_array_handle_setref (fields_arr, (i * 2) + 1, h);
}
return FALSE;
} else {
return TRUE;
}
}
static gboolean
get_executing (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data)
{
MonoMethod **dest = (MonoMethod **)data;
/* skip unmanaged frames */
if (!managed)
return FALSE;
if (!(*dest)) {
if (!strcmp (m_class_get_name_space (m->klass), "System.Reflection"))
return FALSE;
*dest = m;
return TRUE;
}
return FALSE;
}
static gboolean
in_corlib_name_space (MonoClass *klass, const char *name_space)
{
return m_class_get_image (klass) == mono_defaults.corlib &&
!strcmp (m_class_get_name_space (klass), name_space);
}
static gboolean
get_caller_no_reflection (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data)
{
MonoMethod **dest = (MonoMethod **)data;
/* skip unmanaged frames */
if (!managed)
return FALSE;
if (m->wrapper_type != MONO_WRAPPER_NONE)
return FALSE;
if (m == *dest) {
*dest = NULL;
return FALSE;
}
if (in_corlib_name_space (m->klass, "System.Reflection"))
return FALSE;
if (!(*dest)) {
*dest = m;
return TRUE;
}
return FALSE;
}
static gboolean
get_caller_no_system_or_reflection (MonoMethod *m, gint32 no, gint32 ilo, gboolean managed, gpointer data)
{
MonoMethod **dest = (MonoMethod **)data;
/* skip unmanaged frames */
if (!managed)
return FALSE;
if (m->wrapper_type != MONO_WRAPPER_NONE)
return FALSE;
if (m == *dest) {
*dest = NULL;
return FALSE;
}
if (in_corlib_name_space (m->klass, "System.Reflection") || in_corlib_name_space (m->klass, "System"))
return FALSE;
if (!(*dest)) {
*dest = m;
return TRUE;
}
return FALSE;
}
/**
* mono_runtime_get_caller_no_system_or_reflection:
*
* Walk the stack of the current thread and find the first managed method that
* is not in the mscorlib System or System.Reflection namespace. This skips
* unmanaged callers and wrapper methods.
*
* \returns a pointer to the \c MonoMethod or NULL if we walked past all the
* callers.
*/
MonoMethod*
mono_runtime_get_caller_no_system_or_reflection (void)
{
MonoMethod *dest = NULL;
mono_stack_walk_no_il (get_caller_no_system_or_reflection, &dest);
return dest;
}
/*
* mono_runtime_get_caller_from_stack_mark:
*
* Walk the stack and return the assembly of the method referenced
* by the stack mark STACK_MARK.
*/
MonoAssembly*
mono_runtime_get_caller_from_stack_mark (MonoStackCrawlMark *stack_mark)
{
// FIXME: Use the stack mark
MonoMethod *dest = NULL;
mono_stack_walk_no_il (get_caller_no_system_or_reflection, &dest);
if (dest)
return m_class_get_image (dest->klass)->assembly;
else
return NULL;
}
static MonoReflectionType*
type_from_parsed_name (MonoTypeNameParse *info, MonoStackCrawlMark *stack_mark, MonoBoolean ignoreCase, MonoAssembly **caller_assembly, MonoError *error)
{
MonoMethod *m;
MonoType *type = NULL;
MonoAssembly *assembly = NULL;
gboolean type_resolve = FALSE;
MonoImage *rootimage = NULL;
MonoAssemblyLoadContext *alc = mono_alc_get_ambient ();
/*
* We must compute the calling assembly as type loading must happen under a metadata context.
* For example. The main assembly is a.exe and Type.GetType is called from dir/b.dll. Without
* the metadata context (basedir currently) set to dir/b.dll we won't be able to load a dir/c.dll.
*/
m = mono_method_get_last_managed ();
if (m && m_class_get_image (m->klass) != mono_defaults.corlib) {
/* Happens with inlining */
assembly = m_class_get_image (m->klass)->assembly;
} else {
assembly = mono_runtime_get_caller_from_stack_mark (stack_mark);
}
if (assembly) {
type_resolve = TRUE;
rootimage = assembly->image;
} else {
// FIXME: once wasm can use stack marks, consider turning all this into an assert
g_warning (G_STRLOC);
}
*caller_assembly = assembly;
if (info->assembly.name) {
MonoAssemblyByNameRequest req;
mono_assembly_request_prepare_byname (&req, alc);
req.requesting_assembly = assembly;
req.basedir = assembly ? assembly->basedir : NULL;
assembly = mono_assembly_request_byname (&info->assembly, &req, NULL);
}
if (assembly) {
/* When loading from the current assembly, AppDomain.TypeResolve will not be called yet */
type = mono_reflection_get_type_checked (alc, rootimage, assembly->image, info, ignoreCase, TRUE, &type_resolve, error);
goto_if_nok (error, fail);
}
// XXXX - aleksey -
// Say we're looking for System.Generic.Dict<int, Local>
// we FAIL the get type above, because S.G.Dict isn't in assembly->image. So we drop down here.
// but then we FAIL AGAIN because now we pass null as the image and the rootimage and everything
// is messed up when we go to construct the Local as the type arg...
//
// By contrast, if we started with Mine<System.Generic.Dict<int, Local>> we'd go in with assembly->image
// as the root and then even the detour into generics would still not cause issues when we went to load Local.
if (!info->assembly.name && !type) {
/* try mscorlib */
type = mono_reflection_get_type_checked (alc, rootimage, NULL, info, ignoreCase, TRUE, &type_resolve, error);
goto_if_nok (error, fail);
}
if (assembly && !type && type_resolve) {
type_resolve = FALSE; /* This will invoke TypeResolve if not done in the first 'if' */
type = mono_reflection_get_type_checked (alc, rootimage, assembly->image, info, ignoreCase, TRUE, &type_resolve, error);
goto_if_nok (error, fail);
}
if (!type)
goto fail;
return mono_type_get_object_checked (type, error);
fail:
return NULL;
}
void
ves_icall_System_RuntimeTypeHandle_internal_from_name (char *name,
MonoStackCrawlMark *stack_mark,
MonoObjectHandleOnStack res,
MonoBoolean throwOnError,
MonoBoolean ignoreCase,
MonoError *error)
{
MonoTypeNameParse info;
gboolean free_info = FALSE;
MonoAssembly *caller_assembly;
free_info = TRUE;
if (!mono_reflection_parse_type_checked (name, &info, error))
goto leave;
/* mono_reflection_parse_type() mangles the string */
HANDLE_ON_STACK_SET (res, type_from_parsed_name (&info, (MonoStackCrawlMark*)stack_mark, ignoreCase, &caller_assembly, error));
goto_if_nok (error, leave);
if (!(*res)) {
if (throwOnError) {
char *tname = info.name_space ? g_strdup_printf ("%s.%s", info.name_space, info.name) : g_strdup (info.name);
char *aname;
if (info.assembly.name)
aname = mono_stringify_assembly_name (&info.assembly);
else if (caller_assembly)
aname = mono_stringify_assembly_name (mono_assembly_get_name_internal (caller_assembly));
else
aname = g_strdup ("");
mono_error_set_type_load_name (error, tname, aname, "");
}
goto leave;
}
leave:
if (free_info)
mono_reflection_free_type_info (&info);
if (!is_ok (error)) {
if (!throwOnError) {
mono_error_cleanup (error);
error_init (error);
}
}
}
MonoReflectionTypeHandle
ves_icall_System_Type_internal_from_handle (MonoType *handle, MonoError *error)
{
return mono_type_get_object_handle (handle, error);
}
MonoType*
ves_icall_Mono_RuntimeClassHandle_GetTypeFromClass (MonoClass *klass)
{
return m_class_get_byval_arg (klass);
}
void
ves_icall_Mono_RuntimeGPtrArrayHandle_GPtrArrayFree (GPtrArray *ptr_array)
{
g_ptr_array_free (ptr_array, TRUE);
}
void
ves_icall_Mono_SafeStringMarshal_GFree (void *c_str)
{
g_free (c_str);
}
char*
ves_icall_Mono_SafeStringMarshal_StringToUtf8 (MonoString *volatile* s)
{
ERROR_DECL (error);
char *result = mono_string_to_utf8_checked_internal (*s, error);
mono_error_set_pending_exception (error);
return result;
}
/* System.TypeCode */
typedef enum {
TYPECODE_EMPTY,
TYPECODE_OBJECT,
TYPECODE_DBNULL,
TYPECODE_BOOLEAN,
TYPECODE_CHAR,
TYPECODE_SBYTE,
TYPECODE_BYTE,
TYPECODE_INT16,
TYPECODE_UINT16,
TYPECODE_INT32,
TYPECODE_UINT32,
TYPECODE_INT64,
TYPECODE_UINT64,
TYPECODE_SINGLE,
TYPECODE_DOUBLE,
TYPECODE_DECIMAL,
TYPECODE_DATETIME,
TYPECODE_STRING = 18
} TypeCode;
MonoBoolean
ves_icall_RuntimeTypeHandle_type_is_assignable_from (MonoQCallTypeHandle type_handle, MonoQCallTypeHandle c_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoType *ctype = c_handle.type;
MonoClass *klassc = mono_class_from_mono_type_internal (ctype);
if (m_type_is_byref (type) ^ m_type_is_byref (ctype))
return FALSE;
if (m_type_is_byref (type)) {
return mono_byref_type_is_assignable_from (type, ctype, FALSE);
}
gboolean result;
mono_class_is_assignable_from_checked (klass, klassc, &result, error);
return result;
}
MonoBoolean
ves_icall_RuntimeTypeHandle_is_subclass_of (MonoQCallTypeHandle child_handle, MonoQCallTypeHandle base_handle, MonoError *error)
{
MonoType *childType = child_handle.type;
MonoType *baseType = base_handle.type;
mono_bool result = FALSE;
MonoClass *childClass;
MonoClass *baseClass;
childClass = mono_class_from_mono_type_internal (childType);
baseClass = mono_class_from_mono_type_internal (baseType);
if (G_UNLIKELY (m_type_is_byref (childType)))
return !m_type_is_byref (baseType) && baseClass == mono_defaults.object_class;
if (G_UNLIKELY (m_type_is_byref (baseType)))
return FALSE;
if (childType == baseType)
/* .NET IsSubclassOf is not reflexive */
return FALSE;
if (G_UNLIKELY (is_generic_parameter (childType))) {
/* slow path: walk the type hierarchy looking at base types
* until we see baseType. If the current type is not a gparam,
* break out of the loop and use is_subclass_of.
*/
MonoClass *c = mono_generic_param_get_base_type (childClass);
result = FALSE;
while (c != NULL) {
if (c == baseClass)
return TRUE;
if (!is_generic_parameter (m_class_get_byval_arg (c)))
return mono_class_is_subclass_of_internal (c, baseClass, FALSE);
else
c = mono_generic_param_get_base_type (c);
}
return result;
} else {
return mono_class_is_subclass_of_internal (childClass, baseClass, FALSE);
}
}
guint32
ves_icall_RuntimeTypeHandle_IsInstanceOfType (MonoQCallTypeHandle type_handle, MonoObjectHandle obj, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_val_if_nok (error, FALSE);
MonoObjectHandle inst = mono_object_handle_isinst (obj, klass, error);
return_val_if_nok (error, FALSE);
return !MONO_HANDLE_IS_NULL (inst);
}
guint32
ves_icall_RuntimeTypeHandle_GetAttributes (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type) || type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR)
return TYPE_ATTRIBUTE_PUBLIC;
MonoClass *klass = mono_class_from_mono_type_internal (type);
return mono_class_get_flags (klass);
}
guint32
ves_icall_RuntimeTypeHandle_GetMetadataToken (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *mc = mono_class_from_mono_type_internal (type);
if (!mono_class_init_internal (mc)) {
mono_error_set_for_class_failure (error, mc);
return 0;
}
return m_class_get_type_token (mc);
}
MonoReflectionMarshalAsAttributeHandle
ves_icall_System_Reflection_FieldInfo_get_marshal_info (MonoReflectionFieldHandle field_h, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (field_h, field);
MonoClass *klass = m_field_get_parent (field);
MonoGenericClass *gklass = mono_class_try_get_generic_class (klass);
if (mono_class_is_gtd (klass) ||
(gklass && gklass->context.class_inst->is_open))
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
MonoType *ftype = mono_field_get_type_internal (field);
if (ftype && !(ftype->attrs & FIELD_ATTRIBUTE_HAS_FIELD_MARSHAL))
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
MonoMarshalType *info = mono_marshal_load_type_info (klass);
for (int i = 0; i < info->num_fields; ++i) {
if (info->fields [i].field == field) {
if (!info->fields [i].mspec)
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
else {
return mono_reflection_marshal_as_attribute_from_marshal_spec (klass, info->fields [i].mspec, error);
}
}
}
return MONO_HANDLE_CAST (MonoReflectionMarshalAsAttribute, NULL_HANDLE);
}
MonoReflectionFieldHandle
ves_icall_System_Reflection_FieldInfo_internal_from_handle_type (MonoClassField *handle, MonoType *type, MonoError *error)
{
MonoClass *klass;
g_assert (handle);
if (!type) {
klass = m_field_get_parent (handle);
} else {
klass = mono_class_from_mono_type_internal (type);
gboolean found = klass == m_field_get_parent (handle) || mono_class_has_parent (klass, m_field_get_parent (handle));
if (!found)
/* The managed code will throw the exception */
return MONO_HANDLE_CAST (MonoReflectionField, NULL_HANDLE);
}
return mono_field_get_object_handle (klass, handle, error);
}
MonoReflectionEventHandle
ves_icall_System_Reflection_EventInfo_internal_from_handle_type (MonoEvent *handle, MonoType *type, MonoError *error)
{
MonoClass *klass;
g_assert (handle);
if (!type) {
klass = handle->parent;
} else {
klass = mono_class_from_mono_type_internal (type);
gboolean found = klass == handle->parent || mono_class_has_parent (klass, handle->parent);
if (!found)
/* Managed code will throw an exception */
return MONO_HANDLE_CAST (MonoReflectionEvent, NULL_HANDLE);
}
return mono_event_get_object_handle (klass, handle, error);
}
MonoReflectionPropertyHandle
ves_icall_System_Reflection_RuntimePropertyInfo_internal_from_handle_type (MonoProperty *handle, MonoType *type, MonoError *error)
{
MonoClass *klass;
g_assert (handle);
if (!type) {
klass = handle->parent;
} else {
klass = mono_class_from_mono_type_internal (type);
gboolean found = klass == handle->parent || mono_class_has_parent (klass, handle->parent);
if (!found)
/* Managed code will throw an exception */
return MONO_HANDLE_CAST (MonoReflectionProperty, NULL_HANDLE);
}
return mono_property_get_object_handle (klass, handle, error);
}
MonoArrayHandle
ves_icall_System_Reflection_FieldInfo_GetTypeModifiers (MonoReflectionFieldHandle field_h, MonoBoolean optional, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (field_h, field);
MonoType *type = mono_field_get_type_checked (field, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
return type_array_from_modifiers (type, optional, error);
}
int
ves_icall_get_method_attributes (MonoMethod *method)
{
return method->flags;
}
void
ves_icall_get_method_info (MonoMethod *method, MonoMethodInfo *info, MonoError *error)
{
MonoMethodSignature* sig = mono_method_signature_checked (method, error);
return_if_nok (error);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (method->klass), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, parent, MONO_HANDLE_RAW (rt));
MONO_HANDLE_ASSIGN (rt, mono_type_get_object_handle (sig->ret, error));
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, ret, MONO_HANDLE_RAW (rt));
info->attrs = method->flags;
info->implattrs = method->iflags;
guint32 callconv;
if (sig->call_convention == MONO_CALL_DEFAULT)
callconv = sig->sentinelpos >= 0 ? 2 : 1;
else {
if (sig->call_convention == MONO_CALL_VARARG || sig->sentinelpos >= 0)
callconv = 2;
else
callconv = 1;
}
callconv |= (sig->hasthis << 5) | (sig->explicit_this << 6);
info->callconv = callconv;
}
MonoArrayHandle
ves_icall_System_Reflection_MonoMethodInfo_get_parameter_info (MonoMethod *method, MonoReflectionMethodHandle member, MonoError *error)
{
MonoReflectionTypeHandle reftype = MONO_HANDLE_NEW (MonoReflectionType, NULL);
MONO_HANDLE_GET (reftype, member, reftype);
MonoClass *klass = NULL;
if (!MONO_HANDLE_IS_NULL (reftype))
klass = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (reftype, type));
return mono_param_get_objects_internal (method, klass, error);
}
MonoReflectionMarshalAsAttributeHandle
ves_icall_System_MonoMethodInfo_get_retval_marshal (MonoMethod *method, MonoError *error)
{
MonoReflectionMarshalAsAttributeHandle res = MONO_HANDLE_NEW (MonoReflectionMarshalAsAttribute, NULL);
MonoMarshalSpec **mspecs = g_new (MonoMarshalSpec*, mono_method_signature_internal (method)->param_count + 1);
mono_method_get_marshal_info (method, mspecs);
if (mspecs [0]) {
MONO_HANDLE_ASSIGN (res, mono_reflection_marshal_as_attribute_from_marshal_spec (method->klass, mspecs [0], error));
goto_if_nok (error, leave);
}
leave:
for (int i = mono_method_signature_internal (method)->param_count; i >= 0; i--)
if (mspecs [i])
mono_metadata_free_marshal_spec (mspecs [i]);
g_free (mspecs);
return res;
}
gint32
ves_icall_RuntimeFieldInfo_GetFieldOffset (MonoReflectionFieldHandle field, MonoError *error)
{
MonoClassField *class_field = MONO_HANDLE_GETVAL (field, field);
mono_class_setup_fields (m_field_get_parent (class_field));
return class_field->offset - MONO_ABI_SIZEOF (MonoObject);
}
MonoReflectionTypeHandle
ves_icall_RuntimeFieldInfo_GetParentType (MonoReflectionFieldHandle field, MonoBoolean declaring, MonoError *error)
{
MonoClass *parent;
if (declaring) {
MonoClassField *f = MONO_HANDLE_GETVAL (field, field);
parent = m_field_get_parent (f);
} else {
parent = MONO_HANDLE_GETVAL (field, klass);
}
return mono_type_get_object_handle (m_class_get_byval_arg (parent), error);
}
MonoObjectHandle
ves_icall_RuntimeFieldInfo_GetValueInternal (MonoReflectionFieldHandle field_handle, MonoObjectHandle obj_handle, MonoError *error)
{
MonoReflectionField * const field = MONO_HANDLE_RAW (field_handle);
MonoClassField *cf = field->field;
MonoObject * const obj = MONO_HANDLE_RAW (obj_handle);
MonoObject *result;
result = mono_field_get_value_object_checked (cf, obj, error);
return MONO_HANDLE_NEW (MonoObject, result);
}
void
ves_icall_RuntimeFieldInfo_SetValueInternal (MonoReflectionFieldHandle field, MonoObjectHandle obj, MonoObjectHandle value, MonoError *error)
{
MonoClassField *cf = MONO_HANDLE_GETVAL (field, field);
MonoType *type = mono_field_get_type_checked (cf, error);
return_if_nok (error);
gboolean isref = FALSE;
MonoGCHandle value_gchandle = 0;
gchar *v = NULL;
if (!m_type_is_byref (type)) {
switch (type->type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR:
case MONO_TYPE_U:
case MONO_TYPE_I:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8:
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_PTR:
isref = FALSE;
if (!MONO_HANDLE_IS_NULL (value)) {
if (m_class_is_valuetype (mono_handle_class (value)))
v = (char*)mono_object_handle_pin_unbox (value, &value_gchandle);
else {
char* n = g_strdup_printf ("Object of type '%s' cannot be converted to type '%s'.", m_class_get_name (mono_handle_class (value)), m_class_get_name (mono_class_from_mono_type_internal (type)));
mono_error_set_argument (error, cf->name, n);
g_free (n);
return;
}
}
break;
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_CLASS:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
/* Do nothing */
isref = TRUE;
break;
case MONO_TYPE_GENERICINST: {
MonoGenericClass *gclass = type->data.generic_class;
g_assert (!gclass->context.class_inst->is_open);
if (mono_class_is_nullable (mono_class_from_mono_type_internal (type))) {
MonoClass *nklass = mono_class_from_mono_type_internal (type);
/*
* Convert the boxed vtype into a Nullable structure.
* This is complicated by the fact that Nullables have
* a variable structure.
*/
MonoObjectHandle nullable = mono_object_new_handle (nklass, error);
return_if_nok (error);
MonoGCHandle nullable_gchandle = 0;
guint8 *nval = (guint8*)mono_object_handle_pin_unbox (nullable, &nullable_gchandle);
mono_nullable_init_from_handle (nval, value, nklass);
isref = FALSE;
value_gchandle = nullable_gchandle;
v = (gchar*)nval;
}
else {
isref = !m_class_is_valuetype (gclass->container_class);
if (!isref && !MONO_HANDLE_IS_NULL (value)) {
v = (char*)mono_object_handle_pin_unbox (value, &value_gchandle);
};
}
break;
}
default:
g_error ("type 0x%x not handled in "
"ves_icall_FieldInfo_SetValueInternal", type->type);
return;
}
}
/* either value is a reference type, or it's a value type and we pinned
* it and v points to the payload. */
g_assert ((isref && v == NULL && value_gchandle == 0) ||
(!isref && v != NULL && value_gchandle != 0) ||
(!isref && v == NULL && value_gchandle == 0));
if (type->attrs & FIELD_ATTRIBUTE_STATIC) {
MonoVTable *vtable = mono_class_vtable_checked (m_field_get_parent (cf), error);
goto_if_nok (error, leave);
if (!vtable->initialized) {
if (!mono_runtime_class_init_full (vtable, error))
goto leave;
}
if (isref)
mono_field_static_set_value_internal (vtable, cf, MONO_HANDLE_RAW (value)); /* FIXME make mono_field_static_set_value work with handles for value */
else
mono_field_static_set_value_internal (vtable, cf, v);
} else {
if (isref)
MONO_HANDLE_SET_FIELD_REF (obj, cf, value);
else
mono_field_set_value_internal (MONO_HANDLE_RAW (obj), cf, v); /* FIXME: make mono_field_set_value take a handle for obj */
}
leave:
if (value_gchandle)
mono_gchandle_free_internal (value_gchandle);
}
static MonoObjectHandle
typed_reference_to_object (MonoTypedRef *tref, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoObjectHandle result;
if (MONO_TYPE_IS_REFERENCE (tref->type)) {
MonoObject** objp = (MonoObject **)tref->value;
result = MONO_HANDLE_NEW (MonoObject, *objp);
} else if (mono_type_is_pointer (tref->type)) {
/* Boxed as UIntPtr */
result = mono_value_box_handle (mono_get_uintptr_class (), tref->value, error);
} else {
result = mono_value_box_handle (tref->klass, tref->value, error);
}
HANDLE_FUNCTION_RETURN_REF (MonoObject, result);
}
MonoObjectHandle
ves_icall_System_RuntimeFieldHandle_GetValueDirect (MonoReflectionFieldHandle field_h, MonoReflectionTypeHandle field_type_h, MonoTypedRef *obj, MonoReflectionTypeHandle context_type_h, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (field_h, field);
MonoClass *klass = mono_class_from_mono_type_internal (field->type);
if (!MONO_TYPE_ISSTRUCT (m_class_get_byval_arg (m_field_get_parent (field)))) {
mono_error_set_not_implemented (error, "");
return MONO_HANDLE_NEW (MonoObject, NULL);
} else if (MONO_TYPE_IS_REFERENCE (field->type)) {
return MONO_HANDLE_NEW (MonoObject, *(MonoObject**)((guint8*)obj->value + field->offset - sizeof (MonoObject)));
} else {
return mono_value_box_handle (klass, (guint8*)obj->value + field->offset - sizeof (MonoObject), error);
}
}
void
ves_icall_System_RuntimeFieldHandle_SetValueDirect (MonoReflectionFieldHandle field_h, MonoReflectionTypeHandle field_type_h, MonoTypedRef *obj, MonoObjectHandle value_h, MonoReflectionTypeHandle context_type_h, MonoError *error)
{
MonoClassField *f = MONO_HANDLE_GETVAL (field_h, field);
g_assert (obj);
mono_class_setup_fields (m_field_get_parent (f));
if (!MONO_TYPE_ISSTRUCT (m_class_get_byval_arg (m_field_get_parent (f)))) {
MonoObjectHandle objHandle = typed_reference_to_object (obj, error);
return_if_nok (error);
ves_icall_RuntimeFieldInfo_SetValueInternal (field_h, objHandle, value_h, error);
} else if (MONO_TYPE_IS_REFERENCE (f->type)) {
mono_copy_value (f->type, (guint8*)obj->value + m_field_get_offset (f) - sizeof (MonoObject), MONO_HANDLE_RAW (value_h), FALSE);
} else {
MonoGCHandle gchandle = NULL;
g_assert (MONO_HANDLE_RAW (value_h));
mono_copy_value (f->type, (guint8*)obj->value + m_field_get_offset (f) - sizeof (MonoObject), mono_object_handle_pin_unbox (value_h, &gchandle), FALSE);
mono_gchandle_free_internal (gchandle);
}
}
MonoObjectHandle
ves_icall_RuntimeFieldInfo_GetRawConstantValue (MonoReflectionFieldHandle rfield, MonoError* error)
{
MonoObjectHandle o_handle = NULL_HANDLE_INIT;
MonoObject *o = NULL;
MonoClassField *field = MONO_HANDLE_GETVAL (rfield, field);
MonoClass *klass;
gchar *v;
MonoTypeEnum def_type;
const char *def_value;
MonoType *t;
MonoStringHandle string_handle = MONO_HANDLE_NEW (MonoString, NULL); // FIXME? Not always needed.
mono_class_init_internal (m_field_get_parent (field));
t = mono_field_get_type_checked (field, error);
goto_if_nok (error, return_null);
if (!(t->attrs & FIELD_ATTRIBUTE_HAS_DEFAULT))
goto invalid_operation;
if (image_is_dynamic (m_class_get_image (m_field_get_parent (field)))) {
MonoClass *klass = m_field_get_parent (field);
int fidx = field - m_class_get_fields (klass);
MonoFieldDefaultValue *def_values = mono_class_get_field_def_values (klass);
g_assert (def_values);
def_type = def_values [fidx].def_type;
def_value = def_values [fidx].data;
if (def_type == MONO_TYPE_END)
goto invalid_operation;
} else {
def_value = mono_class_get_field_default_value (field, &def_type);
/* FIXME, maybe we should try to raise TLE if field->parent is broken */
if (!def_value)
goto invalid_operation;
}
/*FIXME unify this with reflection.c:mono_get_object_from_blob*/
switch (def_type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR:
case MONO_TYPE_U:
case MONO_TYPE_I:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8: {
MonoType *t;
/* boxed value type */
t = g_new0 (MonoType, 1);
t->type = def_type;
klass = mono_class_from_mono_type_internal (t);
g_free (t);
o = mono_object_new_checked (klass, error);
goto_if_nok (error, return_null);
o_handle = MONO_HANDLE_NEW (MonoObject, o);
v = ((gchar *) o) + sizeof (MonoObject);
(void)mono_get_constant_value_from_blob (def_type, def_value, v, string_handle, error);
goto_if_nok (error, return_null);
break;
}
case MONO_TYPE_STRING:
case MONO_TYPE_CLASS:
(void)mono_get_constant_value_from_blob (def_type, def_value, &o, string_handle, error);
goto_if_nok (error, return_null);
o_handle = MONO_HANDLE_NEW (MonoObject, o);
break;
default:
g_assert_not_reached ();
}
goto exit;
invalid_operation:
mono_error_set_invalid_operation (error, NULL);
// fall through
return_null:
o_handle = NULL_HANDLE;
// fall through
exit:
return o_handle;
}
MonoReflectionTypeHandle
ves_icall_RuntimeFieldInfo_ResolveType (MonoReflectionFieldHandle ref_field, MonoError *error)
{
MonoClassField *field = MONO_HANDLE_GETVAL (ref_field, field);
MonoType *type = mono_field_get_type_checked (field, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE));
return mono_type_get_object_handle (type, error);
}
void
ves_icall_RuntimePropertyInfo_get_property_info (MonoReflectionPropertyHandle property, MonoPropertyInfo *info, PInfo req_info, MonoError *error)
{
const MonoProperty *pproperty = MONO_HANDLE_GETVAL (property, property);
if ((req_info & PInfo_ReflectedType) != 0) {
MonoClass *klass = MONO_HANDLE_GETVAL (property, klass);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, parent, MONO_HANDLE_RAW (rt));
}
if ((req_info & PInfo_DeclaringType) != 0) {
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (pproperty->parent), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, declaring_type, MONO_HANDLE_RAW (rt));
}
if ((req_info & PInfo_Name) != 0) {
MonoStringHandle name = mono_string_new_handle (pproperty->name, error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, name, MONO_HANDLE_RAW (name));
}
if ((req_info & PInfo_Attributes) != 0)
info->attrs = pproperty->attrs;
if ((req_info & PInfo_GetMethod) != 0) {
MonoClass *property_klass = MONO_HANDLE_GETVAL (property, klass);
MonoReflectionMethodHandle rm;
if (pproperty->get &&
(((pproperty->get->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) != METHOD_ATTRIBUTE_PRIVATE) ||
pproperty->get->klass == property_klass)) {
rm = mono_method_get_object_handle (pproperty->get, property_klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, get, MONO_HANDLE_RAW (rm));
}
if ((req_info & PInfo_SetMethod) != 0) {
MonoClass *property_klass = MONO_HANDLE_GETVAL (property, klass);
MonoReflectionMethodHandle rm;
if (pproperty->set &&
(((pproperty->set->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) != METHOD_ATTRIBUTE_PRIVATE) ||
pproperty->set->klass == property_klass)) {
rm = mono_method_get_object_handle (pproperty->set, property_klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, set, MONO_HANDLE_RAW (rm));
}
/*
* There may be other methods defined for properties, though, it seems they are not exposed
* in the reflection API
*/
}
static gboolean
add_event_other_methods_to_array (MonoMethod *m, MonoArrayHandle dest, int i, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoReflectionMethodHandle rm = mono_method_get_object_handle (m, NULL, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, i, rm);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_RuntimeEventInfo_get_event_info (MonoReflectionMonoEventHandle ref_event, MonoEventInfo *info, MonoError *error)
{
MonoClass *klass = MONO_HANDLE_GETVAL (ref_event, klass);
MonoEvent *event = MONO_HANDLE_GETVAL (ref_event, event);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, reflected_type, MONO_HANDLE_RAW (rt));
rt = mono_type_get_object_handle (m_class_get_byval_arg (event->parent), error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, declaring_type, MONO_HANDLE_RAW (rt));
MonoStringHandle ev_name = mono_string_new_handle (event->name, error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, name, MONO_HANDLE_RAW (ev_name));
info->attrs = event->attrs;
MonoReflectionMethodHandle rm;
if (event->add) {
rm = mono_method_get_object_handle (event->add, klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, add_method, MONO_HANDLE_RAW (rm));
if (event->remove) {
rm = mono_method_get_object_handle (event->remove, klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, remove_method, MONO_HANDLE_RAW (rm));
if (event->raise) {
rm = mono_method_get_object_handle (event->raise, klass, error);
return_if_nok (error);
} else {
rm = MONO_HANDLE_NEW (MonoReflectionMethod, NULL);
}
MONO_STRUCT_SETREF_INTERNAL (info, raise_method, MONO_HANDLE_RAW (rm));
#ifndef MONO_SMALL_CONFIG
if (event->other) {
int i, n = 0;
while (event->other [n])
n++;
MonoArrayHandle info_arr = mono_array_new_handle (mono_defaults.method_info_class, n, error);
return_if_nok (error);
MONO_STRUCT_SETREF_INTERNAL (info, other_methods, MONO_HANDLE_RAW (info_arr));
for (i = 0; i < n; i++)
if (!add_event_other_methods_to_array (event->other [i], info_arr, i, error))
return;
}
#endif
}
static void
collect_interfaces (MonoClass *klass, GHashTable *ifaces, MonoError *error)
{
int i;
MonoClass *ic;
mono_class_setup_interfaces (klass, error);
return_if_nok (error);
int klass_interface_count = m_class_get_interface_count (klass);
MonoClass **klass_interfaces = m_class_get_interfaces (klass);
for (i = 0; i < klass_interface_count; i++) {
ic = klass_interfaces [i];
g_hash_table_insert (ifaces, ic, ic);
collect_interfaces (ic, ifaces, error);
return_if_nok (error);
}
}
typedef struct {
MonoArrayHandle iface_array;
MonoGenericContext *context;
MonoError *error;
int next_idx;
} FillIfaceArrayData;
static void
fill_iface_array (gpointer key, gpointer value, gpointer user_data)
{
HANDLE_FUNCTION_ENTER ();
FillIfaceArrayData *data = (FillIfaceArrayData *)user_data;
MonoClass *ic = (MonoClass *)key;
MonoType *ret = m_class_get_byval_arg (ic), *inflated = NULL;
MonoError *error = data->error;
goto_if_nok (error, leave);
if (data->context && mono_class_is_ginst (ic) && mono_class_get_generic_class (ic)->context.class_inst->is_open) {
inflated = ret = mono_class_inflate_generic_type_checked (ret, data->context, error);
goto_if_nok (error, leave);
}
MonoReflectionTypeHandle rt;
rt = mono_type_get_object_handle (ret, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (data->iface_array, data->next_idx, rt);
data->next_idx++;
if (inflated)
mono_metadata_free_type (inflated);
leave:
HANDLE_FUNCTION_RETURN ();
}
static guint
get_interfaces_hash (gconstpointer v1)
{
MonoClass *k = (MonoClass*)v1;
return m_class_get_type_token (k);
}
void
ves_icall_RuntimeType_GetInterfaces (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
GHashTable *iface_hash = g_hash_table_new (get_interfaces_hash, NULL);
MonoGenericContext *context = NULL;
if (mono_class_is_ginst (klass) && mono_class_get_generic_class (klass)->context.class_inst->is_open) {
context = mono_class_get_context (klass);
klass = mono_class_get_generic_class (klass)->container_class;
}
for (MonoClass *parent = klass; parent; parent = m_class_get_parent (parent)) {
mono_class_setup_interfaces (parent, error);
goto_if_nok (error, fail);
collect_interfaces (parent, iface_hash, error);
goto_if_nok (error, fail);
}
MonoDomain *domain = mono_get_root_domain ();
int len;
len = g_hash_table_size (iface_hash);
if (len == 0) {
g_hash_table_destroy (iface_hash);
if (!domain->empty_types) {
domain->empty_types = mono_array_new_cached (mono_defaults.runtimetype_class, 0, error);
goto_if_nok (error, fail);
}
HANDLE_ON_STACK_SET (res, domain->empty_types);
return;
}
FillIfaceArrayData data;
data.iface_array = MONO_HANDLE_NEW (MonoArray, mono_array_new_cached (mono_defaults.runtimetype_class, len, error));
goto_if_nok (error, fail);
data.context = context;
data.error = error;
data.next_idx = 0;
g_hash_table_foreach (iface_hash, fill_iface_array, &data);
goto_if_nok (error, fail);
g_hash_table_destroy (iface_hash);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (data.iface_array));
return;
fail:
g_hash_table_destroy (iface_hash);
}
static gboolean
method_is_reabstracted (MonoMethod *method)
{
/* only on interfaces */
/* method is marked "final abstract" */
/* FIXME: we need some other way to detect reabstracted methods. "final" is an incidental detail of the spec. */
return m_method_is_final (method) && m_method_is_abstract (method);
}
static gboolean
method_is_dim (MonoMethod *method)
{
/* only valid on interface methods*/
/* method is marked "virtual" but not "virtual abstract" */
return m_method_is_virtual (method) && !m_method_is_abstract (method);
}
static gboolean
set_interface_map_data_method_object (MonoMethod *method, MonoClass *iclass, int ioffset, MonoClass *klass, MonoArrayHandle targets, MonoArrayHandle methods, int i, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoReflectionMethodHandle member = mono_method_get_object_handle (method, iclass, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (methods, i, member);
MonoMethod* foundMethod = m_class_get_vtable (klass) [i + ioffset];
if (mono_class_has_dim_conflicts (klass) && mono_class_is_interface (foundMethod->klass)) {
GSList* conflicts = mono_class_get_dim_conflicts (klass);
GSList* l;
MonoMethod* decl = method;
if (decl->is_inflated)
decl = ((MonoMethodInflated*)decl)->declaring;
gboolean in_conflict = FALSE;
for (l = conflicts; l; l = l->next) {
if (decl == l->data) {
in_conflict = TRUE;
break;
}
}
if (in_conflict) {
MONO_HANDLE_ARRAY_SETREF (targets, i, NULL_HANDLE);
goto leave;
}
}
/*
* if the iterface method is reabstracted, and either the found implementation method is abstract, or the found
* implementation method is from another DIM (meaning neither klass nor any of its ancestor classes implemented
* the method), then say the target method is null.
*/
if (method_is_reabstracted (method) &&
(m_method_is_abstract (foundMethod) ||
(mono_class_is_interface (foundMethod->klass) && method_is_dim (foundMethod))))
MONO_HANDLE_ARRAY_SETREF (targets, i, NULL_HANDLE);
else if (mono_class_is_interface (foundMethod->klass) && method_is_reabstracted (foundMethod) && !m_class_is_abstract (klass)) {
/* if the method we found is a reabstracted DIM method, but the class isn't abstract, return NULL */
/*
* (C# doesn't seem to allow constructing such types, it requires the whole class to be abstract - in
* which case we are supposed to return the reabstracted interface method. But in IL we can make a
* non-abstract class with reabstracted interface methods - which is supposed to fail with an
* EntryPointNotFoundException at invoke time, but does not prevent the class from loading.)
*/
MONO_HANDLE_ARRAY_SETREF (targets, i, NULL_HANDLE);
} else {
MONO_HANDLE_ASSIGN (member, mono_method_get_object_handle (foundMethod, mono_class_is_interface (foundMethod->klass) ? foundMethod->klass : klass, error));
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (targets, i, member);
}
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_RuntimeType_GetInterfaceMapData (MonoQCallTypeHandle type_handle, MonoQCallTypeHandle iface_handle, MonoArrayHandleOut targets, MonoArrayHandleOut methods, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoType *iface = iface_handle.type;
MonoClass *iclass = mono_class_from_mono_type_internal (iface);
mono_class_init_checked (klass, error);
return_if_nok (error);
mono_class_init_checked (iclass, error);
return_if_nok (error);
mono_class_setup_vtable (klass);
gboolean variance_used;
int ioffset = mono_class_interface_offset_with_variance (klass, iclass, &variance_used);
if (ioffset == -1)
return;
MonoMethod* method;
int i = 0;
gpointer iter = NULL;
while ((method = mono_class_get_methods(iclass, &iter))) {
if (method->flags & METHOD_ATTRIBUTE_VIRTUAL)
i++;
}
MonoArrayHandle targets_arr = mono_array_new_handle (mono_defaults.method_info_class, i, error);
return_if_nok (error);
MONO_HANDLE_ASSIGN (targets, targets_arr);
MonoArrayHandle methods_arr = mono_array_new_handle (mono_defaults.method_info_class, i, error);
return_if_nok (error);
MONO_HANDLE_ASSIGN (methods, methods_arr);
i = 0;
iter = NULL;
while ((method = mono_class_get_methods (iclass, &iter))) {
if (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL))
continue;
if (!set_interface_map_data_method_object (method, iclass, ioffset, klass, targets, methods, i, error))
return;
i ++;
}
}
void
ves_icall_RuntimeType_GetPacking (MonoQCallTypeHandle type_handle, guint32 *packing, guint32 *size, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
if (image_is_dynamic (m_class_get_image (klass))) {
MonoGCHandle ref_info_handle = mono_class_get_ref_info_handle (klass);
g_assert (ref_info_handle);
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mono_gchandle_get_target_internal (ref_info_handle);
g_assert (tb);
*packing = tb->packing_size;
*size = tb->class_size;
} else {
mono_metadata_packing_from_typedef (m_class_get_image (klass), m_class_get_type_token (klass), packing, size);
}
}
void
ves_icall_RuntimeTypeHandle_GetElementType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (!m_type_is_byref (type) && type->type == MONO_TYPE_SZARRAY) {
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (type->data.klass), error));
return;
}
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
// GetElementType should only return a type for:
// Array Pointer PassedByRef
if (m_type_is_byref (type))
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (klass), error));
else if (m_class_get_element_class (klass) && MONO_CLASS_IS_ARRAY (klass))
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (m_class_get_element_class (klass)), error));
else if (m_class_get_element_class (klass) && type->type == MONO_TYPE_PTR)
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (m_class_get_element_class (klass)), error));
else
HANDLE_ON_STACK_SET (res, NULL);
}
void
ves_icall_RuntimeTypeHandle_GetBaseType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return;
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (!m_class_get_parent (klass))
return;
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (m_class_get_parent (klass)), error));
}
guint32
ves_icall_RuntimeTypeHandle_GetCorElementType (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return MONO_TYPE_BYREF;
else
return (guint32)type->type;
}
MonoBoolean
ves_icall_RuntimeTypeHandle_HasReferences (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass;
klass = mono_class_from_mono_type_internal (type);
mono_class_init_internal (klass);
return m_class_has_references (klass);
}
MonoBoolean
ves_icall_RuntimeTypeHandle_IsByRefLike (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
/* .NET Core says byref types are not IsByRefLike */
if (m_type_is_byref (type))
return FALSE;
MonoClass *klass = mono_class_from_mono_type_internal (type);
return m_class_is_byreflike (klass);
}
MonoBoolean
ves_icall_RuntimeTypeHandle_IsComObject (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_val_if_nok (error, FALSE);
return mono_class_is_com_object (klass);
}
guint32
ves_icall_reflection_get_token (MonoObjectHandle obj, MonoError *error)
{
return mono_reflection_get_token_checked (obj, error);
}
void
ves_icall_RuntimeTypeHandle_GetModule (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *t = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (t);
MonoReflectionModuleHandle module;
module = mono_module_get_object_handle (m_class_get_image (klass), error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (module));
}
void
ves_icall_RuntimeTypeHandle_GetAssembly (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *t = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (t);
MonoReflectionAssemblyHandle assembly;
assembly = mono_assembly_get_object_handle (m_class_get_image (klass)->assembly, error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (assembly));
}
void
ves_icall_RuntimeType_GetDeclaringType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass;
if (m_type_is_byref (type))
return;
if (type->type == MONO_TYPE_VAR) {
MonoGenericContainer *param = mono_type_get_generic_param_owner (type);
klass = param ? param->owner.klass : NULL;
} else if (type->type == MONO_TYPE_MVAR) {
MonoGenericContainer *param = mono_type_get_generic_param_owner (type);
klass = param ? param->owner.method->klass : NULL;
} else {
klass = m_class_get_nested_in (mono_class_from_mono_type_internal (type));
}
if (!klass)
return;
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (klass), error));
}
void
ves_icall_RuntimeType_GetName (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
// FIXME: this should be escaped in some scenarios with mono_identifier_escape_type_name_chars
// Determining exactly when to do so is fairly difficult, so for now we don't bother to avoid regressions
const char *klass_name = m_class_get_name (klass);
if (m_type_is_byref (type)) {
char *n = g_strdup_printf ("%s&", klass_name);
HANDLE_ON_STACK_SET (res, mono_string_new_checked (n, error));
g_free (n);
} else {
HANDLE_ON_STACK_SET (res, mono_string_new_checked (klass_name, error));
}
}
void
ves_icall_RuntimeType_GetNamespace (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoClass *klass_nested_in;
while ((klass_nested_in = m_class_get_nested_in (klass)))
klass = klass_nested_in;
if (m_class_get_name_space (klass) [0] == '\0')
return;
char *escaped = mono_identifier_escape_type_name_chars (m_class_get_name_space (klass));
HANDLE_ON_STACK_SET (res, mono_string_new_checked (escaped, error));
g_free (escaped);
}
gint32
ves_icall_RuntimeTypeHandle_GetArrayRank (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
if (type->type != MONO_TYPE_ARRAY && type->type != MONO_TYPE_SZARRAY) {
mono_error_set_argument (error, "type", "Type must be an array type");
return 0;
}
MonoClass *klass = mono_class_from_mono_type_internal (type);
return m_class_get_rank (klass);
}
static MonoArrayHandle
create_type_array (MonoBoolean runtimeTypeArray, int count, MonoError *error)
{
return mono_array_new_handle (runtimeTypeArray ? mono_defaults.runtimetype_class : mono_defaults.systemtype_class, count, error);
}
static gboolean
set_type_object_in_array (MonoType *type, MonoArrayHandle dest, int i, MonoError *error)
{
HANDLE_FUNCTION_ENTER();
MonoReflectionTypeHandle rt = mono_type_get_object_handle (type, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, i, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_RuntimeType_GetGenericArgumentsInternal (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res_handle, MonoBoolean runtimeTypeArray, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoArrayHandle res = MONO_HANDLE_NEW (MonoArray, NULL);
if (mono_class_is_gtd (klass)) {
MonoGenericContainer *container = mono_class_get_generic_container (klass);
MONO_HANDLE_ASSIGN (res, create_type_array (runtimeTypeArray, container->type_argc, error));
return_if_nok (error);
for (int i = 0; i < container->type_argc; ++i) {
MonoClass *pklass = mono_class_create_generic_parameter (mono_generic_container_get_param (container, i));
if (!set_type_object_in_array (m_class_get_byval_arg (pklass), res, i, error))
return;
}
} else if (mono_class_is_ginst (klass)) {
MonoGenericInst *inst = mono_class_get_generic_class (klass)->context.class_inst;
MONO_HANDLE_ASSIGN (res, create_type_array (runtimeTypeArray, inst->type_argc, error));
return_if_nok (error);
for (int i = 0; i < inst->type_argc; ++i) {
if (!set_type_object_in_array (inst->type_argv [i], res, i, error))
return;
}
}
HANDLE_ON_STACK_SET(res_handle, MONO_HANDLE_RAW (res));
}
MonoBoolean
ves_icall_RuntimeTypeHandle_IsGenericTypeDefinition (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return FALSE;
MonoClass *klass = mono_class_from_mono_type_internal (type);
return mono_class_is_gtd (klass);
}
void
ves_icall_RuntimeTypeHandle_GetGenericTypeDefinition_impl (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return;
MonoClass *klass;
klass = mono_class_from_mono_type_internal (type);
if (mono_class_is_gtd (klass)) {
HANDLE_ON_STACK_SET (res, NULL);
return;
}
if (mono_class_is_ginst (klass)) {
MonoClass *generic_class = mono_class_get_generic_class (klass)->container_class;
MonoGCHandle ref_info_handle = mono_class_get_ref_info_handle (generic_class);
if (m_class_was_typebuilder (generic_class) && ref_info_handle) {
MonoObjectHandle tb = mono_gchandle_get_target_handle (ref_info_handle);
g_assert (!MONO_HANDLE_IS_NULL (tb));
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (tb));
} else {
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (generic_class), error));
}
}
}
void
ves_icall_RuntimeType_MakeGenericType (MonoReflectionTypeHandle reftype, MonoArrayHandle type_array, MonoObjectHandleOnStack res, MonoError *error)
{
g_assert (IS_MONOTYPE_HANDLE (reftype));
MonoType *type = MONO_HANDLE_GETVAL (reftype, type);
mono_class_init_checked (mono_class_from_mono_type_internal (type), error);
return_if_nok (error);
int count = mono_array_handle_length (type_array);
MonoType **types = g_new0 (MonoType *, count);
MonoReflectionTypeHandle t = MONO_HANDLE_NEW (MonoReflectionType, NULL);
for (int i = 0; i < count; i++) {
MONO_HANDLE_ARRAY_GETREF (t, type_array, i);
types [i] = MONO_HANDLE_GETVAL (t, type);
}
MonoType *geninst = mono_reflection_bind_generic_parameters (reftype, count, types, error);
g_free (types);
if (!geninst)
return;
MonoClass *klass = mono_class_from_mono_type_internal (geninst);
/*we might inflate to the GTD*/
if (mono_class_is_ginst (klass) && !mono_verifier_class_is_valid_generic_instantiation (klass)) {
mono_error_set_argument (error, "typeArguments", "Invalid generic arguments");
return;
}
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (geninst, error));
}
MonoBoolean
ves_icall_RuntimeTypeHandle_HasInstantiation (MonoQCallTypeHandle type_handle)
{
MonoClass *klass;
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return FALSE;
klass = mono_class_from_mono_type_internal (type);
return mono_class_is_ginst (klass) || mono_class_is_gtd (klass);
}
gint32
ves_icall_RuntimeType_GetGenericParameterPosition (MonoQCallTypeHandle type_handle)
{
MonoType *type = type_handle.type;
if (is_generic_parameter (type))
return mono_type_get_generic_param_num (type);
return -1;
}
MonoGenericParamInfo *
ves_icall_RuntimeTypeHandle_GetGenericParameterInfo (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
return mono_generic_param_info (type->data.generic_param);
}
MonoReflectionMethodHandle
ves_icall_RuntimeType_GetCorrespondingInflatedMethod (MonoQCallTypeHandle type_handle,
MonoReflectionMethodHandle generic,
MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
MonoMethod *generic_method = MONO_HANDLE_GETVAL (generic, method);
MonoReflectionMethodHandle ret = MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
MonoMethod *method;
gpointer iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
if (method->token == generic_method->token) {
ret = mono_method_get_object_handle (method, klass, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
}
}
return ret;
}
void
ves_icall_RuntimeType_GetDeclaringMethod (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type) || (type->type != MONO_TYPE_MVAR && type->type != MONO_TYPE_VAR)) {
mono_error_set_invalid_operation (error, "DeclaringMethod can only be used on generic arguments");
return;
}
if (type->type == MONO_TYPE_VAR)
return;
MonoMethod *method;
method = mono_type_get_generic_param_owner (type)->owner.method;
g_assert (method);
HANDLE_ON_STACK_SET (res, mono_method_get_object_checked (method, method->klass, error));
}
void
ves_icall_RuntimeMethodInfo_GetPInvoke (MonoReflectionMethodHandle ref_method, int* flags, MonoStringHandleOut entry_point, MonoStringHandleOut dll_name, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
MonoImage *image = m_class_get_image (method->klass);
MonoMethodPInvoke *piinfo = (MonoMethodPInvoke *)method;
MonoTableInfo *tables = image->tables;
MonoTableInfo *im = &tables [MONO_TABLE_IMPLMAP];
MonoTableInfo *mr = &tables [MONO_TABLE_MODULEREF];
guint32 im_cols [MONO_IMPLMAP_SIZE];
guint32 scope_token;
const char *import = NULL;
const char *scope = NULL;
if (image_is_dynamic (image)) {
MonoReflectionMethodAux *method_aux =
(MonoReflectionMethodAux *)g_hash_table_lookup (((MonoDynamicImage*)image)->method_aux_hash, method);
if (method_aux) {
import = method_aux->dllentry;
scope = method_aux->dll;
}
if (!import || !scope) {
mono_error_set_argument (error, "method", "System.Refleciton.Emit method with invalid pinvoke information");
return;
}
}
else {
if (piinfo->implmap_idx) {
mono_metadata_decode_row (im, piinfo->implmap_idx - 1, im_cols, MONO_IMPLMAP_SIZE);
piinfo->piflags = im_cols [MONO_IMPLMAP_FLAGS];
import = mono_metadata_string_heap (image, im_cols [MONO_IMPLMAP_NAME]);
scope_token = mono_metadata_decode_row_col (mr, im_cols [MONO_IMPLMAP_SCOPE] - 1, MONO_MODULEREF_NAME);
scope = mono_metadata_string_heap (image, scope_token);
}
}
*flags = piinfo->piflags;
MONO_HANDLE_ASSIGN (entry_point, mono_string_new_handle (import, error));
return_if_nok (error);
MONO_HANDLE_ASSIGN (dll_name, mono_string_new_handle (scope, error));
}
MonoReflectionMethodHandle
ves_icall_RuntimeMethodInfo_GetGenericMethodDefinition (MonoReflectionMethodHandle ref_method, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
if (method->is_generic)
return ref_method;
if (!method->is_inflated)
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
MonoMethodInflated *imethod = (MonoMethodInflated *) method;
MonoMethod *result = imethod->declaring;
/* Not a generic method. */
if (!result->is_generic)
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
if (image_is_dynamic (m_class_get_image (method->klass))) {
MonoDynamicImage *image = (MonoDynamicImage*)m_class_get_image (method->klass);
/*
* FIXME: Why is this stuff needed at all ? Why can't the code below work for
* the dynamic case as well ?
*/
mono_image_lock ((MonoImage*)image);
MonoReflectionMethodHandle res = MONO_HANDLE_NEW (MonoReflectionMethod, (MonoReflectionMethod*)mono_g_hash_table_lookup (image->generic_def_objects, imethod));
mono_image_unlock ((MonoImage*)image);
if (!MONO_HANDLE_IS_NULL (res))
return res;
}
if (imethod->context.class_inst) {
MonoClass *klass = ((MonoMethod *) imethod)->klass;
/*Generic methods gets the context of the GTD.*/
if (mono_class_get_context (klass)) {
result = mono_class_inflate_generic_method_full_checked (result, klass, mono_class_get_context (klass), error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
}
}
return mono_method_get_object_handle (result, NULL, error);
}
static GENERATE_TRY_GET_CLASS_WITH_CACHE (stream, "System.IO", "Stream")
static int io_stream_begin_read_slot = -1;
static int io_stream_begin_write_slot = -1;
static int io_stream_end_read_slot = -1;
static int io_stream_end_write_slot = -1;
static gboolean io_stream_slots_set = FALSE;
static void
init_io_stream_slots (void)
{
MonoClass* klass = mono_class_try_get_stream_class ();
mono_class_setup_vtable (klass);
MonoMethod **klass_methods = m_class_get_methods (klass);
if (!klass_methods) {
mono_class_setup_methods (klass);
klass_methods = m_class_get_methods (klass);
}
int method_count = mono_class_get_method_count (klass);
int methods_found = 0;
for (int i = 0; i < method_count; i++) {
// find slots for Begin(End)Read and Begin(End)Write
MonoMethod* m = klass_methods [i];
if (m->slot == -1)
continue;
if (!strcmp (m->name, "BeginRead")) {
methods_found++;
io_stream_begin_read_slot = m->slot;
} else if (!strcmp (m->name, "BeginWrite")) {
methods_found++;
io_stream_begin_write_slot = m->slot;
} else if (!strcmp (m->name, "EndRead")) {
methods_found++;
io_stream_end_read_slot = m->slot;
} else if (!strcmp (m->name, "EndWrite")) {
methods_found++;
io_stream_end_write_slot = m->slot;
}
}
g_assert (methods_found <= 4); // some of them can be linked out
io_stream_slots_set = TRUE;
}
MonoBoolean
ves_icall_System_IO_Stream_HasOverriddenBeginEndRead (MonoObjectHandle stream, MonoError *error)
{
MonoClass* curr_klass = MONO_HANDLE_GET_CLASS (stream);
MonoClass* base_klass = mono_class_try_get_stream_class ();
if (!io_stream_slots_set)
init_io_stream_slots ();
// slots can still be -1 and it means Linker removed the methods from the base class (Stream)
// in this case we can safely assume the methods are not overridden
// otherwise - check vtable
MonoMethod **curr_klass_vtable = m_class_get_vtable (curr_klass);
gboolean begin_read_is_overriden = io_stream_begin_read_slot != -1 && curr_klass_vtable [io_stream_begin_read_slot]->klass != base_klass;
gboolean end_read_is_overriden = io_stream_end_read_slot != -1 && curr_klass_vtable [io_stream_end_read_slot]->klass != base_klass;
// return true if BeginRead or EndRead were overriden
return begin_read_is_overriden || end_read_is_overriden;
}
MonoBoolean
ves_icall_System_IO_Stream_HasOverriddenBeginEndWrite (MonoObjectHandle stream, MonoError *error)
{
MonoClass* curr_klass = MONO_HANDLE_GETVAL (stream, vtable)->klass;
MonoClass* base_klass = mono_class_try_get_stream_class ();
if (!io_stream_slots_set)
init_io_stream_slots ();
// slots can still be -1 and it means Linker removed the methods from the base class (Stream)
// in this case we can safely assume the methods are not overridden
// otherwise - check vtable
MonoMethod **curr_klass_vtable = m_class_get_vtable (curr_klass);
gboolean begin_write_is_overriden = io_stream_begin_write_slot != -1 && curr_klass_vtable [io_stream_begin_write_slot]->klass != base_klass;
gboolean end_write_is_overriden = io_stream_end_write_slot != -1 && curr_klass_vtable [io_stream_end_write_slot]->klass != base_klass;
// return true if BeginWrite or EndWrite were overriden
return begin_write_is_overriden || end_write_is_overriden;
}
MonoBoolean
ves_icall_RuntimeMethodInfo_get_IsGenericMethod (MonoReflectionMethodHandle ref_method, MonoError *erro)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
return mono_method_signature_internal (method)->generic_param_count != 0;
}
MonoBoolean
ves_icall_RuntimeMethodInfo_get_IsGenericMethodDefinition (MonoReflectionMethodHandle ref_method, MonoError *Error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
return method->is_generic;
}
static gboolean
set_array_generic_argument_handle_inflated (MonoGenericInst *inst, int i, MonoArrayHandle arr, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoReflectionTypeHandle rt = mono_type_get_object_handle (inst->type_argv [i], error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (arr, i, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
static gboolean
set_array_generic_argument_handle_gparam (MonoGenericContainer *container, int i, MonoArrayHandle arr, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoGenericParam *param = mono_generic_container_get_param (container, i);
MonoClass *pklass = mono_class_create_generic_parameter (param);
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (pklass), error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (arr, i, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
MonoArrayHandle
ves_icall_RuntimeMethodInfo_GetGenericArguments (MonoReflectionMethodHandle ref_method, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (ref_method, method);
if (method->is_inflated) {
MonoGenericInst *inst = mono_method_get_context (method)->method_inst;
if (inst) {
int count = inst->type_argc;
MonoArrayHandle res = mono_array_new_handle (mono_defaults.systemtype_class, count, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
for (int i = 0; i < count; i++) {
if (!set_array_generic_argument_handle_inflated (inst, i, res, error))
break;
}
return_val_if_nok (error, NULL_HANDLE_ARRAY);
return res;
}
}
int count = mono_method_signature_internal (method)->generic_param_count;
MonoArrayHandle res = mono_array_new_handle (mono_defaults.systemtype_class, count, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
MonoGenericContainer *container = mono_method_get_generic_container (method);
for (int i = 0; i < count; i++) {
if (!set_array_generic_argument_handle_gparam (container, i, res, error))
break;
}
return_val_if_nok (error, NULL_HANDLE_ARRAY);
return res;
}
MonoObjectHandle
ves_icall_InternalInvoke (MonoReflectionMethodHandle method_handle, MonoObjectHandle this_arg_handle,
MonoSpanOfObjects *params_span, MonoExceptionHandleOut exception_out, MonoError *error)
{
MonoReflectionMethod* const method = MONO_HANDLE_RAW (method_handle);
MonoObject* const this_arg = MONO_HANDLE_RAW (this_arg_handle);
g_assert (params_span != NULL);
/*
* Invoke from reflection is supposed to always be a virtual call (the API
* is stupid), mono_runtime_invoke_*() calls the provided method, allowing
* greater flexibility.
*/
MonoMethod *m = method->method;
MonoMethodSignature* const sig = mono_method_signature_internal (m);
int pcount = 0;
void *obj = this_arg;
MonoObject *result = NULL;
MonoArray *arr = NULL;
MonoException *exception = NULL;
*MONO_HANDLE_REF (exception_out) = NULL;
if (!(m->flags & METHOD_ATTRIBUTE_STATIC)) {
if (!mono_class_vtable_checked (m->klass, error)) {
mono_error_cleanup (error); /* FIXME does this make sense? */
error_init_reuse (error);
exception = mono_class_get_exception_for_failure (m->klass);
goto return_null;
}
if (this_arg) {
m = mono_object_get_virtual_method_internal (this_arg, m);
/* must pass the pointer to the value for valuetype methods */
if (m_class_is_valuetype (m->klass)) {
obj = mono_object_unbox_internal (this_arg);
// FIXMEcoop? Does obj need to be put into a handle?
}
} else if (strcmp (m->name, ".ctor") && !m->wrapper_type) {
exception = mono_exception_from_name_msg (mono_defaults.corlib, "System.Reflection", "TargetException", "Non-static method requires a target.");
goto return_null;
}
}
/* Array constructor */
if (m_class_get_rank (m->klass) && !strcmp (m->name, ".ctor")) {
int i;
pcount = mono_span_length (params_span);
uintptr_t * const lengths = g_newa (uintptr_t, pcount);
/* Note: the synthetized array .ctors have int32 as argument type */
for (i = 0; i < pcount; ++i)
lengths [i] = *(int32_t*) ((char*)mono_span_get (params_span, MonoObject*, i) + sizeof (MonoObject));
if (m_class_get_rank (m->klass) == 1 && sig->param_count == 2 && m_class_get_rank (m_class_get_element_class (m->klass))) {
/* This is a ctor for jagged arrays. MS creates an array of arrays. */
arr = mono_array_new_full_checked (m->klass, lengths, NULL, error);
goto_if_nok (error, return_null);
MonoArrayHandle subarray_handle = MONO_HANDLE_NEW (MonoArray, NULL);
for (i = 0; i < mono_array_length_internal (arr); ++i) {
MonoArray *subarray = mono_array_new_full_checked (m_class_get_element_class (m->klass), &lengths [1], NULL, error);
goto_if_nok (error, return_null);
MONO_HANDLE_ASSIGN_RAW (subarray_handle, subarray); // FIXME? Overkill?
mono_array_setref_fast (arr, i, subarray);
}
goto exit;
}
if (m_class_get_rank (m->klass) == pcount) {
/* Only lengths provided. */
arr = mono_array_new_full_checked (m->klass, lengths, NULL, error);
goto_if_nok (error, return_null);
goto exit;
} else {
g_assert (pcount == (m_class_get_rank (m->klass) * 2));
/* The arguments are lower-bound-length pairs */
intptr_t * const lower_bounds = (intptr_t *)g_alloca (sizeof (intptr_t) * pcount);
for (i = 0; i < pcount / 2; ++i) {
lower_bounds [i] = *(int32_t*) ((char*)mono_span_get (params_span, MonoObject*, (i * 2)) + sizeof (MonoObject));
lengths [i] = *(int32_t*) ((char*)mono_span_get (params_span, MonoObject*, (i * 2) + 1) + sizeof (MonoObject));
}
arr = mono_array_new_full_checked (m->klass, lengths, lower_bounds, error);
goto_if_nok (error, return_null);
goto exit;
}
}
result = mono_runtime_invoke_span_checked (m, obj, params_span, error);
goto exit;
return_null:
result = NULL;
arr = NULL;
exit:
if (exception) {
MONO_HANDLE_NEW (MonoException, exception); // FIXME? overkill?
mono_gc_wbarrier_generic_store_internal (MONO_HANDLE_REF (exception_out), (MonoObject*)exception);
}
g_assert (!result || !arr); // only one, or neither, should be set
return result ? MONO_HANDLE_NEW (MonoObject, result) : arr ? MONO_HANDLE_NEW (MonoObject, (MonoObject*)arr) : NULL_HANDLE;
}
static guint64
read_enum_value (const char *mem, int type)
{
switch (type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
return *(guint8*)mem;
case MONO_TYPE_I1:
return *(gint8*)mem;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
return read16 (mem);
case MONO_TYPE_I2:
return (gint16) read16 (mem);
case MONO_TYPE_U4:
case MONO_TYPE_R4:
return read32 (mem);
case MONO_TYPE_I4:
return (gint32) read32 (mem);
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8:
return read64 (mem);
case MONO_TYPE_U:
case MONO_TYPE_I:
#if SIZEOF_REGISTER == 8
return read64 (mem);
#else
return read32 (mem);
#endif
default:
g_assert_not_reached ();
}
return 0;
}
static void
write_enum_value (void *mem, int type, guint64 value)
{
switch (type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN: {
guint8 *p = (guint8*)mem;
*p = value;
break;
}
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_CHAR: {
guint16 *p = (guint16 *)mem;
*p = value;
break;
}
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4: {
guint32 *p = (guint32 *)mem;
*p = value;
break;
}
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8: {
guint64 *p = (guint64 *)mem;
*p = value;
break;
}
case MONO_TYPE_U:
case MONO_TYPE_I: {
#if SIZEOF_REGISTER == 8
guint64 *p = (guint64 *)mem;
*p = value;
#else
guint32 *p = (guint32 *)mem;
*p = value;
break;
#endif
break;
}
default:
g_assert_not_reached ();
}
return;
}
void
ves_icall_System_Enum_InternalBoxEnum (MonoQCallTypeHandle enum_handle, MonoObjectHandleOnStack res, guint64 value, MonoError *error)
{
MonoClass *enumc;
MonoObjectHandle resultHandle;
MonoType *etype;
enumc = mono_class_from_mono_type_internal (enum_handle.type);
mono_class_init_checked (enumc, error);
return_if_nok (error);
etype = mono_class_enum_basetype_internal (enumc);
resultHandle = mono_object_new_handle (enumc, error);
return_if_nok (error);
write_enum_value (mono_handle_unbox_unsafe (resultHandle), etype->type, value);
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (resultHandle));
}
void
ves_icall_System_Enum_InternalGetUnderlyingType (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *etype;
MonoClass *klass;
klass = mono_class_from_mono_type_internal (type_handle.type);
mono_class_init_checked (klass, error);
return_if_nok (error);
etype = mono_class_enum_basetype_internal (klass);
if (!etype) {
mono_error_set_argument (error, "enumType", "Type provided must be an Enum.");
return;
}
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (etype, error));
}
int
ves_icall_System_Enum_InternalGetCorElementType (MonoQCallTypeHandle type_handle)
{
MonoClass *klass = mono_class_from_mono_type_internal (type_handle.type);
return (int)m_class_get_byval_arg (m_class_get_element_class (klass))->type;
}
static void
get_enum_field (MonoArrayHandle names, MonoArrayHandle values, int base_type, MonoClassField *field, guint* j, guint64 *previous_value, gboolean *sorted, MonoError *error)
{
HANDLE_FUNCTION_ENTER();
guint64 field_value;
const char *p;
MonoTypeEnum def_type;
if (!(field->type->attrs & FIELD_ATTRIBUTE_STATIC))
goto leave;
if (strcmp ("value__", mono_field_get_name (field)) == 0)
goto leave;
if (mono_field_is_deleted (field))
goto leave;
MonoStringHandle name;
name = mono_string_new_handle (mono_field_get_name (field), error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (names, *j, name);
p = mono_class_get_field_default_value (field, &def_type);
/* len = */ mono_metadata_decode_blob_size (p, &p);
field_value = read_enum_value (p, base_type);
MONO_HANDLE_ARRAY_SETVAL (values, guint64, *j, field_value);
if (*previous_value > field_value)
*sorted = FALSE;
*previous_value = field_value;
(*j)++;
leave:
HANDLE_FUNCTION_RETURN();
}
MonoBoolean
ves_icall_System_Enum_GetEnumValuesAndNames (MonoQCallTypeHandle type_handle, MonoArrayHandleOut values, MonoArrayHandleOut names, MonoError *error)
{
MonoClass *enumc = mono_class_from_mono_type_internal (type_handle.type);
guint j = 0, nvalues;
gpointer iter;
MonoClassField *field;
int base_type;
guint64 previous_value = 0;
gboolean sorted = TRUE;
mono_class_init_checked (enumc, error);
return_val_if_nok (error, FALSE);
if (!m_class_is_enumtype (enumc)) {
mono_error_set_argument (error, NULL, "Type provided must be an Enum.");
return TRUE;
}
base_type = mono_class_enum_basetype_internal (enumc)->type;
nvalues = mono_class_num_fields (enumc) > 0 ? mono_class_num_fields (enumc) - 1 : 0;
MONO_HANDLE_ASSIGN(names, mono_array_new_handle (mono_defaults.string_class, nvalues, error));
return_val_if_nok (error, FALSE);
MONO_HANDLE_ASSIGN(values, mono_array_new_handle (mono_defaults.uint64_class, nvalues, error));
return_val_if_nok (error, FALSE);
iter = NULL;
while ((field = mono_class_get_fields_internal (enumc, &iter))) {
get_enum_field (names, values, base_type, field, &j, &previous_value, &sorted, error);
if (!is_ok (error))
break;
}
return_val_if_nok (error, FALSE);
return sorted || base_type == MONO_TYPE_R4 || base_type == MONO_TYPE_R8;
}
enum {
BFLAGS_IgnoreCase = 1,
BFLAGS_DeclaredOnly = 2,
BFLAGS_Instance = 4,
BFLAGS_Static = 8,
BFLAGS_Public = 0x10,
BFLAGS_NonPublic = 0x20,
BFLAGS_FlattenHierarchy = 0x40,
BFLAGS_InvokeMethod = 0x100,
BFLAGS_CreateInstance = 0x200,
BFLAGS_GetField = 0x400,
BFLAGS_SetField = 0x800,
BFLAGS_GetProperty = 0x1000,
BFLAGS_SetProperty = 0x2000,
BFLAGS_ExactBinding = 0x10000,
BFLAGS_SuppressChangeType = 0x20000,
BFLAGS_OptionalParamBinding = 0x40000
};
enum {
MLISTTYPE_All = 0,
MLISTTYPE_CaseSensitive = 1,
MLISTTYPE_CaseInsensitive = 2,
MLISTTYPE_HandleToInfo = 3
};
GPtrArray*
ves_icall_RuntimeType_GetFields_native (MonoQCallTypeHandle type_handle, char *utf8_name, guint32 bflags, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
int (*compare_func) (const char *s1, const char *s2) = NULL;
compare_func = ((bflags & BFLAGS_IgnoreCase) || (mlisttype == MLISTTYPE_CaseInsensitive)) ? mono_utf8_strcasecmp : strcmp;
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
GPtrArray *ptr_array = g_ptr_array_sized_new (16);
handle_parent:
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
goto fail;
}
MonoClassField *field;
gpointer iter;
iter = NULL;
while ((field = mono_class_get_fields_lazy (klass, &iter))) {
guint32 flags = mono_field_get_flags (field);
int match = 0;
if (mono_field_is_deleted_with_flags (field, flags))
continue;
if ((flags & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) == FIELD_ATTRIBUTE_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else if ((klass == startklass) || (flags & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK) != FIELD_ATTRIBUTE_PRIVATE) {
if (bflags & BFLAGS_NonPublic) {
match++;
}
}
if (!match)
continue;
match = 0;
if (flags & FIELD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
if (((mlisttype != MLISTTYPE_All) && (utf8_name != NULL)) && compare_func (mono_field_get_name (field), utf8_name))
continue;
g_ptr_array_add (ptr_array, field);
}
if (!(bflags & BFLAGS_DeclaredOnly) && (klass = m_class_get_parent (klass)))
goto handle_parent;
return ptr_array;
fail:
g_ptr_array_free (ptr_array, TRUE);
return NULL;
}
static gboolean
method_nonpublic (MonoMethod* method, gboolean start_klass)
{
switch (method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) {
case METHOD_ATTRIBUTE_ASSEM:
return TRUE;
case METHOD_ATTRIBUTE_PRIVATE:
return start_klass;
case METHOD_ATTRIBUTE_PUBLIC:
return FALSE;
default:
return TRUE;
}
}
GPtrArray*
mono_class_get_methods_by_name (MonoClass *klass, const char *name, guint32 bflags, guint32 mlisttype, gboolean allow_ctors, MonoError *error)
{
GPtrArray *array;
MonoClass *startklass;
MonoMethod *method;
gpointer iter;
int match, nslots;
/*FIXME, use MonoBitSet*/
guint32 method_slots_default [8];
guint32 *method_slots = NULL;
int (*compare_func) (const char *s1, const char *s2) = NULL;
array = g_ptr_array_new ();
startklass = klass;
compare_func = ((bflags & BFLAGS_IgnoreCase) || (mlisttype == MLISTTYPE_CaseInsensitive)) ? mono_utf8_strcasecmp : strcmp;
/* An optimization for calls made from Delegate:CreateDelegate () */
if (m_class_is_delegate (klass) && klass != mono_defaults.delegate_class && klass != mono_defaults.multicastdelegate_class && name && !strcmp (name, "Invoke") && (bflags == (BFLAGS_Public | BFLAGS_Static | BFLAGS_Instance))) {
method = mono_get_delegate_invoke_internal (klass);
g_assert (method);
g_ptr_array_add (array, method);
return array;
}
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass))
goto loader_error;
if (is_generic_parameter (m_class_get_byval_arg (klass)))
nslots = mono_class_get_vtable_size (m_class_get_parent (klass));
else
nslots = MONO_CLASS_IS_INTERFACE_INTERNAL (klass) ? mono_class_num_methods (klass) : mono_class_get_vtable_size (klass);
if (nslots >= sizeof (method_slots_default) * 8) {
method_slots = g_new0 (guint32, nslots / 32 + 1);
} else {
method_slots = method_slots_default;
memset (method_slots, 0, sizeof (method_slots_default));
}
handle_parent:
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass))
goto loader_error;
iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
match = 0;
if (method->slot != -1) {
g_assert (method->slot < nslots);
if (method_slots [method->slot >> 5] & (1 << (method->slot & 0x1f)))
continue;
if (!(method->flags & METHOD_ATTRIBUTE_NEW_SLOT))
method_slots [method->slot >> 5] |= 1 << (method->slot & 0x1f);
}
if (!allow_ctors && method->name [0] == '.' && (strcmp (method->name, ".ctor") == 0 || strcmp (method->name, ".cctor") == 0))
continue;
if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else if ((bflags & BFLAGS_NonPublic) && method_nonpublic (method, (klass == startklass))) {
match++;
}
if (!match)
continue;
match = 0;
if (method->flags & METHOD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
if ((mlisttype != MLISTTYPE_All) && (name != NULL)) {
if (compare_func (name, method->name))
continue;
}
match = 0;
g_ptr_array_add (array, method);
}
if (!(bflags & BFLAGS_DeclaredOnly) && (klass = m_class_get_parent (klass)))
goto handle_parent;
if (method_slots != method_slots_default)
g_free (method_slots);
return array;
loader_error:
if (method_slots != method_slots_default)
g_free (method_slots);
g_ptr_array_free (array, TRUE);
g_assert (mono_class_has_failure (klass));
mono_error_set_for_class_failure (error, klass);
return NULL;
}
GPtrArray*
ves_icall_RuntimeType_GetMethodsByName_native (MonoQCallTypeHandle type_handle, const char *mname, guint32 bflags, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (m_type_is_byref (type))
return g_ptr_array_new ();
return mono_class_get_methods_by_name (klass, mname, bflags, mlisttype, FALSE, error);
}
GPtrArray*
ves_icall_RuntimeType_GetConstructors_native (MonoQCallTypeHandle type_handle, guint32 bflags, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type)) {
return g_ptr_array_new ();
}
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
mono_class_setup_methods (klass);
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
return NULL;
}
GPtrArray *res_array = g_ptr_array_sized_new (4); /* FIXME, guestimating */
MonoMethod *method;
gpointer iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
int match = 0;
if (strcmp (method->name, ".ctor") && strcmp (method->name, ".cctor"))
continue;
if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else {
if (bflags & BFLAGS_NonPublic)
match++;
}
if (!match)
continue;
match = 0;
if (method->flags & METHOD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
g_ptr_array_add (res_array, method);
}
return res_array;
}
static guint
property_hash (gconstpointer data)
{
MonoProperty *prop = (MonoProperty*)data;
return g_str_hash (prop->name);
}
static gboolean
property_accessor_override (MonoMethod *method1, MonoMethod *method2)
{
if (method1->slot != -1 && method1->slot == method2->slot)
return TRUE;
if (mono_class_get_generic_type_definition (method1->klass) == mono_class_get_generic_type_definition (method2->klass)) {
if (method1->is_inflated)
method1 = ((MonoMethodInflated*) method1)->declaring;
if (method2->is_inflated)
method2 = ((MonoMethodInflated*) method2)->declaring;
}
return mono_metadata_signature_equal (mono_method_signature_internal (method1), mono_method_signature_internal (method2));
}
static gboolean
property_equal (MonoProperty *prop1, MonoProperty *prop2)
{
// Properties are hide-by-name-and-signature
if (!g_str_equal (prop1->name, prop2->name))
return FALSE;
/* If we see a property in a generic method, we want to
compare the generic signatures, not the inflated signatures
because we might conflate two properties that were
distinct:
class Foo<T,U> {
public T this[T t] { getter { return t; } } // method 1
public U this[U u] { getter { return u; } } // method 2
}
If we see int Foo<int,int>::Item[int] we need to know if
the indexer came from method 1 or from method 2, and we
shouldn't conflate them. (Bugzilla 36283)
*/
if (prop1->get && prop2->get && !property_accessor_override (prop1->get, prop2->get))
return FALSE;
if (prop1->set && prop2->set && !property_accessor_override (prop1->set, prop2->set))
return FALSE;
return TRUE;
}
static gboolean
property_accessor_nonpublic (MonoMethod* accessor, gboolean start_klass)
{
if (!accessor)
return FALSE;
return method_nonpublic (accessor, start_klass);
}
GPtrArray*
ves_icall_RuntimeType_GetPropertiesByName_native (MonoQCallTypeHandle type_handle, gchar *propname, guint32 bflags, guint32 mlisttype, MonoError *error)
{
// Fetch non-public properties as well because they can hide public properties with the same name in base classes
bflags |= BFLAGS_NonPublic;
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
int (*compare_func) (const char *s1, const char *s2) = (mlisttype == MLISTTYPE_CaseInsensitive) ? mono_utf8_strcasecmp : strcmp;
GPtrArray *res_array = g_ptr_array_sized_new (8); /*This the average for ASP.NET types*/
GHashTable *properties = g_hash_table_new (property_hash, (GEqualFunc)property_equal);
handle_parent:
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
goto loader_error;
}
MonoProperty *prop;
gpointer iter;
iter = NULL;
while ((prop = mono_class_get_properties (klass, &iter))) {
int match = 0;
MonoMethod *method = prop->get;
if (!method)
method = prop->set;
guint32 flags = 0;
if (method)
flags = method->flags;
if ((prop->get && ((prop->get->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC)) ||
(prop->set && ((prop->set->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PUBLIC))) {
if (bflags & BFLAGS_Public)
match++;
} else if (bflags & BFLAGS_NonPublic) {
if (property_accessor_nonpublic(prop->get, startklass == klass) ||
property_accessor_nonpublic(prop->set, startklass == klass)) {
match++;
}
}
if (!match)
continue;
match = 0;
if (flags & METHOD_ATTRIBUTE_STATIC) {
if (bflags & BFLAGS_Static)
if ((bflags & BFLAGS_FlattenHierarchy) || (klass == startklass))
match++;
} else {
if (bflags & BFLAGS_Instance)
match++;
}
if (!match)
continue;
match = 0;
if ((mlisttype != MLISTTYPE_All) && (propname != NULL) && compare_func (propname, prop->name))
continue;
if (g_hash_table_lookup (properties, prop))
continue;
g_ptr_array_add (res_array, prop);
g_hash_table_insert (properties, prop, prop);
}
if (!(bflags & BFLAGS_DeclaredOnly) && (klass = m_class_get_parent (klass))) {
// BFLAGS_NonPublic should be excluded for base classes
bflags &= ~BFLAGS_NonPublic;
goto handle_parent;
}
g_hash_table_destroy (properties);
return res_array;
loader_error:
if (properties)
g_hash_table_destroy (properties);
g_ptr_array_free (res_array, TRUE);
return NULL;
}
static guint
event_hash (gconstpointer data)
{
MonoEvent *event = (MonoEvent*)data;
return g_str_hash (event->name);
}
static gboolean
event_equal (MonoEvent *event1, MonoEvent *event2)
{
// Events are hide-by-name
return g_str_equal (event1->name, event2->name);
}
GPtrArray*
ves_icall_RuntimeType_GetEvents_native (MonoQCallTypeHandle type_handle, char *utf8_name, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
int (*compare_func) (const char *s1, const char *s2) = (mlisttype == MLISTTYPE_CaseInsensitive) ? mono_utf8_strcasecmp : strcmp;
GPtrArray *res_array = g_ptr_array_sized_new (4);
MonoClass *startklass, *klass;
klass = startklass = mono_class_from_mono_type_internal (type);
GHashTable *events = g_hash_table_new (event_hash, (GEqualFunc)event_equal);
handle_parent:
mono_class_setup_methods (klass);
mono_class_setup_vtable (klass);
if (mono_class_has_failure (klass)) {
mono_error_set_for_class_failure (error, klass);
goto failure;
}
MonoEvent *event;
gpointer iter;
iter = NULL;
while ((event = mono_class_get_events (klass, &iter))) {
// Remove inherited privates and inherited
// without add/remove/raise methods
if (klass != startklass)
{
MonoMethod *method = event->add;
if (!method)
method = event->remove;
if (!method)
method = event->raise;
if (!method)
continue;
if ((method->flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK) == METHOD_ATTRIBUTE_PRIVATE)
continue;
}
if ((mlisttype != MLISTTYPE_All) && (utf8_name != NULL) && compare_func (event->name, utf8_name))
continue;
if (g_hash_table_lookup (events, event))
continue;
g_ptr_array_add (res_array, event);
g_hash_table_insert (events, event, event);
}
if ((klass = m_class_get_parent (klass)))
goto handle_parent;
g_hash_table_destroy (events);
return res_array;
failure:
if (events != NULL)
g_hash_table_destroy (events);
g_ptr_array_free (res_array, TRUE);
return NULL;
}
GPtrArray *
ves_icall_RuntimeType_GetNestedTypes_native (MonoQCallTypeHandle type_handle, char *str, guint32 bflags, guint32 mlisttype, MonoError *error)
{
MonoType *type = type_handle.type;
if (m_type_is_byref (type))
return g_ptr_array_new ();
int (*compare_func) (const char *s1, const char *s2) = ((bflags & BFLAGS_IgnoreCase) || (mlisttype == MLISTTYPE_CaseInsensitive)) ? mono_utf8_strcasecmp : strcmp;
MonoClass *klass = mono_class_from_mono_type_internal (type);
/*
* If a nested type is generic, return its generic type definition.
* Note that this means that the return value is essentially the set
* of nested types of the generic type definition of @klass.
*
* A note in MSDN claims that a generic type definition can have
* nested types that aren't generic. In any case, the container of that
* nested type would be the generic type definition.
*/
if (mono_class_is_ginst (klass))
klass = mono_class_get_generic_class (klass)->container_class;
GPtrArray *res_array = g_ptr_array_new ();
MonoClass *nested;
gpointer iter = NULL;
while ((nested = mono_class_get_nested_types (klass, &iter))) {
int match = 0;
if ((mono_class_get_flags (nested) & TYPE_ATTRIBUTE_VISIBILITY_MASK) == TYPE_ATTRIBUTE_NESTED_PUBLIC) {
if (bflags & BFLAGS_Public)
match++;
} else {
if (bflags & BFLAGS_NonPublic)
match++;
}
if (!match)
continue;
if ((mlisttype != MLISTTYPE_All) && (str != NULL) && compare_func (m_class_get_name (nested), str))
continue;
g_ptr_array_add (res_array, m_class_get_byval_arg (nested));
}
return res_array;
}
static MonoType*
get_type_from_module_builder_module (MonoAssemblyLoadContext *alc, MonoArrayHandle modules, int i, MonoTypeNameParse *info, MonoBoolean ignoreCase, gboolean *type_resolve, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoType *type = NULL;
MonoReflectionModuleBuilderHandle mb = MONO_HANDLE_NEW (MonoReflectionModuleBuilder, NULL);
MONO_HANDLE_ARRAY_GETREF (mb, modules, i);
MonoDynamicImage *dynamic_image = MONO_HANDLE_GETVAL (mb, dynamic_image);
type = mono_reflection_get_type_checked (alc, &dynamic_image->image, &dynamic_image->image, info, ignoreCase, FALSE, type_resolve, error);
HANDLE_FUNCTION_RETURN_VAL (type);
}
static MonoType*
get_type_from_module_builder_loaded_modules (MonoAssemblyLoadContext *alc, MonoArrayHandle loaded_modules, int i, MonoTypeNameParse *info, MonoBoolean ignoreCase, gboolean *type_resolve, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoType *type = NULL;
MonoReflectionModuleHandle mod = MONO_HANDLE_NEW (MonoReflectionModule, NULL);
MONO_HANDLE_ARRAY_GETREF (mod, loaded_modules, i);
MonoImage *image = MONO_HANDLE_GETVAL (mod, image);
type = mono_reflection_get_type_checked (alc, image, image, info, ignoreCase, FALSE, type_resolve, error);
HANDLE_FUNCTION_RETURN_VAL (type);
}
MonoReflectionTypeHandle
ves_icall_System_Reflection_Assembly_InternalGetType (MonoReflectionAssemblyHandle assembly_h, MonoReflectionModuleHandle module, MonoStringHandle name, MonoBoolean throwOnError, MonoBoolean ignoreCase, MonoError *error)
{
ERROR_DECL (parse_error);
MonoTypeNameParse info;
gboolean type_resolve;
MonoAssemblyLoadContext *alc = mono_alc_get_ambient ();
/* On MS.NET, this does not fire a TypeResolve event */
type_resolve = TRUE;
char *str = mono_string_handle_to_utf8 (name, error);
goto_if_nok (error, fail);
/*g_print ("requested type %s in %s\n", str, assembly->assembly->aname.name);*/
if (!mono_reflection_parse_type_checked (str, &info, parse_error)) {
g_free (str);
mono_reflection_free_type_info (&info);
mono_error_cleanup (parse_error);
if (throwOnError) {
mono_error_set_argument (error, "typeName@0", "failed to parse the type");
goto fail;
}
/*g_print ("failed parse\n");*/
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
if (info.assembly.name) {
g_free (str);
mono_reflection_free_type_info (&info);
if (throwOnError) {
mono_error_set_argument (error, NULL, "Type names passed to Assembly.GetType() must not specify an assembly.");
goto fail;
}
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
MonoType *type;
type = NULL;
if (!MONO_HANDLE_IS_NULL (module)) {
MonoImage *image = MONO_HANDLE_GETVAL (module, image);
if (image) {
type = mono_reflection_get_type_checked (alc, image, image, &info, ignoreCase, FALSE, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
}
}
else {
MonoAssembly *assembly = MONO_HANDLE_GETVAL (assembly_h, assembly);
if (assembly_is_dynamic (assembly)) {
/* Enumerate all modules */
MonoReflectionAssemblyBuilderHandle abuilder = MONO_HANDLE_NEW (MonoReflectionAssemblyBuilder, NULL);
MONO_HANDLE_ASSIGN (abuilder, assembly_h);
int i;
MonoArrayHandle modules = MONO_HANDLE_NEW (MonoArray, NULL);
MONO_HANDLE_GET (modules, abuilder, modules);
if (!MONO_HANDLE_IS_NULL (modules)) {
int n = mono_array_handle_length (modules);
for (i = 0; i < n; ++i) {
type = get_type_from_module_builder_module (alc, modules, i, &info, ignoreCase, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
if (type)
break;
}
}
MonoArrayHandle loaded_modules = MONO_HANDLE_NEW (MonoArray, NULL);
MONO_HANDLE_GET (loaded_modules, abuilder, loaded_modules);
if (!type && !MONO_HANDLE_IS_NULL (loaded_modules)) {
int n = mono_array_handle_length (loaded_modules);
for (i = 0; i < n; ++i) {
type = get_type_from_module_builder_loaded_modules (alc, loaded_modules, i, &info, ignoreCase, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
if (type)
break;
}
}
}
else {
type = mono_reflection_get_type_checked (alc, assembly->image, assembly->image, &info, ignoreCase, FALSE, &type_resolve, error);
if (!is_ok (error)) {
g_free (str);
mono_reflection_free_type_info (&info);
goto fail;
}
}
}
g_free (str);
mono_reflection_free_type_info (&info);
if (!type) {
if (throwOnError) {
ERROR_DECL (inner_error);
char *type_name = mono_string_handle_to_utf8 (name, inner_error);
mono_error_assert_ok (inner_error);
MonoAssembly *assembly = MONO_HANDLE_GETVAL (assembly_h, assembly);
char *assmname = mono_stringify_assembly_name (&assembly->aname);
mono_error_set_type_load_name (error, type_name, assmname, "%s", "");
goto fail;
}
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
if (type->type == MONO_TYPE_CLASS) {
MonoClass *klass = mono_type_get_class_internal (type);
/* need to report exceptions ? */
if (throwOnError && mono_class_has_failure (klass)) {
/* report SecurityException (or others) that occured when loading the assembly */
mono_error_set_for_class_failure (error, klass);
goto fail;
}
}
/* g_print ("got it\n"); */
return mono_type_get_object_handle (type, error);
fail:
g_assert (!is_ok (error));
return MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
}
/* This corresponds to RuntimeAssembly.AssemblyInfoKind */
typedef enum {
ASSEMBLY_INFO_KIND_LOCATION = 1,
ASSEMBLY_INFO_KIND_CODEBASE = 2,
ASSEMBLY_INFO_KIND_FULLNAME = 3,
ASSEMBLY_INFO_KIND_VERSION = 4
} MonoAssemblyInfoKind;
void
ves_icall_System_Reflection_RuntimeAssembly_GetInfo (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, guint32 int_kind, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoAssemblyInfoKind kind = (MonoAssemblyInfoKind)int_kind;
switch (kind) {
case ASSEMBLY_INFO_KIND_LOCATION: {
const char *image_name = m_image_get_filename (assembly->image);
HANDLE_ON_STACK_SET (res, mono_string_new_checked (image_name != NULL ? image_name : "", error));
break;
}
case ASSEMBLY_INFO_KIND_CODEBASE: {
/* return NULL for bundled assemblies in single-file scenarios */
const char* filename = m_image_get_filename (assembly->image);
if (!filename)
break;
gchar *absolute;
if (g_path_is_absolute (filename))
absolute = g_strdup (filename);
else
absolute = g_build_filename (assembly->basedir, filename, (const char*)NULL);
mono_icall_make_platform_path (absolute);
const gchar *prepend = mono_icall_get_file_path_prefix (absolute);
gchar *uri = g_strconcat (prepend, absolute, (const char*)NULL);
g_free (absolute);
if (uri) {
HANDLE_ON_STACK_SET (res, mono_string_new_checked (uri, error));
g_free (uri);
return_if_nok (error);
}
break;
}
case ASSEMBLY_INFO_KIND_FULLNAME: {
char *name = mono_stringify_assembly_name (&assembly->aname);
HANDLE_ON_STACK_SET (res, mono_string_new_checked (name, error));
g_free (name);
return_if_nok (error);
break;
}
case ASSEMBLY_INFO_KIND_VERSION: {
HANDLE_ON_STACK_SET (res, mono_string_new_checked (assembly->image->version, error));
return_if_nok (error);
break;
}
default:
g_assert_not_reached ();
}
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetEntryPoint (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoMethod *method;
guint32 token = mono_image_get_entry_point (assembly->image);
if (!token)
return;
method = mono_get_method_checked (assembly->image, token, NULL, NULL, error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, mono_method_get_object_checked (method, NULL, error));
}
void
ves_icall_System_Reflection_Assembly_GetManifestModuleInternal (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, MonoError *error)
{
MonoAssembly *a = assembly_h.assembly;
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (mono_module_get_object_handle (a->image, error)));
}
static gboolean
add_manifest_resource_name_to_array (MonoImage *image, MonoTableInfo *table, int i, MonoArrayHandle dest, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
const char *val = mono_metadata_string_heap (image, mono_metadata_decode_row_col (table, i, MONO_MANIFEST_NAME));
MonoStringHandle str = mono_string_new_handle (val, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, i, str);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceNames (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoTableInfo *table = &assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE];
/* FIXME: metadata-update */
int rows = table_info_get_rows (table);
MonoArrayHandle result = mono_array_new_handle (mono_defaults.string_class, rows, error);
return_if_nok (error);
for (int i = 0; i < rows; ++i) {
if (!add_manifest_resource_name_to_array (assembly->image, table, i, result, error))
return;
}
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (result));
}
static MonoAssemblyName*
create_referenced_assembly_name (MonoImage *image, int i, MonoError *error)
{
MonoAssemblyName *aname = g_new0 (MonoAssemblyName, 1);
mono_assembly_get_assemblyref_checked (image, i, aname, error);
return_val_if_nok (error, NULL);
aname->hash_alg = ASSEMBLY_HASH_SHA1 /* SHA1 (default) */;
/* name and culture are pointers into the image tables, but we need
* real malloc'd strings (so that we can g_free() them later from
* Mono.RuntimeMarshal.FreeAssemblyName) */
aname->name = g_strdup (aname->name);
aname->culture = g_strdup (aname->culture);
/* Don't need the hash value in managed */
aname->hash_value = NULL;
aname->hash_len = 0;
g_assert (aname->public_key == NULL);
/* note: this function doesn't return the codebase on purpose (i.e. it can
be used under partial trust as path information isn't present). */
return aname;
}
GPtrArray*
ves_icall_System_Reflection_Assembly_InternalGetReferencedAssemblies (MonoReflectionAssemblyHandle assembly_h, MonoError *error)
{
MonoAssembly *assembly = MONO_HANDLE_GETVAL (assembly_h, assembly);
MonoImage *image = assembly->image;
int count;
/* FIXME: metadata-update */
if (image_is_dynamic (assembly->image)) {
MonoDynamicTable *t = &(((MonoDynamicImage*) image)->tables [MONO_TABLE_ASSEMBLYREF]);
count = t->rows;
}
else {
MonoTableInfo *t = &image->tables [MONO_TABLE_ASSEMBLYREF];
count = table_info_get_rows (t);
}
GPtrArray *result = g_ptr_array_sized_new (count);
for (int i = 0; i < count; i++) {
MonoAssemblyName *aname = create_referenced_assembly_name (image, i, error);
if (!is_ok (error))
break;
g_ptr_array_add (result, aname);
}
return result;
}
/* move this in some file in mono/util/ */
static char *
g_concat_dir_and_file (const char *dir, const char *file)
{
g_return_val_if_fail (dir != NULL, NULL);
g_return_val_if_fail (file != NULL, NULL);
/*
* If the directory name doesn't have a / on the end, we need
* to add one so we get a proper path to the file
*/
if (dir [strlen(dir) - 1] != G_DIR_SEPARATOR)
return g_strconcat (dir, G_DIR_SEPARATOR_S, file, (const char*)NULL);
else
return g_strconcat (dir, file, (const char*)NULL);
}
static MonoReflectionAssemblyHandle
try_resource_resolve_name (MonoReflectionAssemblyHandle assembly_handle, MonoStringHandle name_handle)
{
MonoObjectHandle ret;
ERROR_DECL (error);
HANDLE_FUNCTION_ENTER ();
if (mono_runtime_get_no_exec ())
goto return_null;
MONO_STATIC_POINTER_INIT (MonoMethod, resolve_method)
static gboolean inited;
if (!inited) {
MonoClass *alc_class = mono_class_get_assembly_load_context_class ();
g_assert (alc_class);
resolve_method = mono_class_get_method_from_name_checked (alc_class, "OnResourceResolve", -1, 0, error);
inited = TRUE;
}
mono_error_cleanup (error);
error_init_reuse (error);
MONO_STATIC_POINTER_INIT_END (MonoMethod, resolve_method)
if (!resolve_method)
goto return_null;
gpointer args [2];
args [0] = MONO_HANDLE_RAW (assembly_handle);
args [1] = MONO_HANDLE_RAW (name_handle);
ret = mono_runtime_try_invoke_handle (resolve_method, NULL_HANDLE, args, error);
goto_if_nok (error, return_null);
goto exit;
return_null:
ret = NULL_HANDLE;
exit:
HANDLE_FUNCTION_RETURN_REF (MonoReflectionAssembly, MONO_HANDLE_CAST (MonoReflectionAssembly, ret));
}
void *
ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInternal (MonoQCallAssemblyHandle assembly_h, MonoStringHandle name, gint32 *size, MonoObjectHandleOnStack ref_module, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoTableInfo *table = &assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE];
guint32 i;
guint32 cols [MONO_MANIFEST_SIZE];
guint32 impl, file_idx;
const char *val;
MonoImage *module;
char *n = mono_string_handle_to_utf8 (name, error);
return_val_if_nok (error, NULL);
/* FIXME: metadata update */
int rows = table_info_get_rows (table);
for (i = 0; i < rows; ++i) {
mono_metadata_decode_row (table, i, cols, MONO_MANIFEST_SIZE);
val = mono_metadata_string_heap (assembly->image, cols [MONO_MANIFEST_NAME]);
if (strcmp (val, n) == 0)
break;
}
g_free (n);
if (i == rows)
return NULL;
/* FIXME */
impl = cols [MONO_MANIFEST_IMPLEMENTATION];
if (impl) {
/*
* this code should only be called after obtaining the
* ResourceInfo and handling the other cases.
*/
g_assert ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_FILE);
file_idx = impl >> MONO_IMPLEMENTATION_BITS;
module = mono_image_load_file_for_image_checked (assembly->image, file_idx, error);
if (!is_ok (error) || !module)
return NULL;
} else {
module = assembly->image;
}
MonoReflectionModuleHandle rm = mono_module_get_object_handle (module, error);
return_val_if_nok (error, NULL);
HANDLE_ON_STACK_SET (ref_module, MONO_HANDLE_RAW (rm));
return (void*)mono_image_get_resource (module, cols [MONO_MANIFEST_OFFSET], (guint32*)size);
}
static gboolean
get_manifest_resource_info_internal (MonoAssembly *assembly, MonoStringHandle name, MonoManifestResourceInfoHandle info, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoTableInfo *table = &assembly->image->tables [MONO_TABLE_MANIFESTRESOURCE];
int i;
guint32 cols [MONO_MANIFEST_SIZE];
guint32 file_cols [MONO_FILE_SIZE];
const char *val;
char *n;
gboolean result = FALSE;
n = mono_string_handle_to_utf8 (name, error);
goto_if_nok (error, leave);
int rows = table_info_get_rows (table);
for (i = 0; i < rows; ++i) {
mono_metadata_decode_row (table, i, cols, MONO_MANIFEST_SIZE);
val = mono_metadata_string_heap (assembly->image, cols [MONO_MANIFEST_NAME]);
if (strcmp (val, n) == 0)
break;
}
g_free (n);
if (i == rows)
goto leave;
if (!cols [MONO_MANIFEST_IMPLEMENTATION]) {
MONO_HANDLE_SETVAL (info, location, guint32, RESOURCE_LOCATION_EMBEDDED | RESOURCE_LOCATION_IN_MANIFEST);
}
else {
switch (cols [MONO_MANIFEST_IMPLEMENTATION] & MONO_IMPLEMENTATION_MASK) {
case MONO_IMPLEMENTATION_FILE:
i = cols [MONO_MANIFEST_IMPLEMENTATION] >> MONO_IMPLEMENTATION_BITS;
table = &assembly->image->tables [MONO_TABLE_FILE];
mono_metadata_decode_row (table, i - 1, file_cols, MONO_FILE_SIZE);
val = mono_metadata_string_heap (assembly->image, file_cols [MONO_FILE_NAME]);
MONO_HANDLE_SET (info, filename, mono_string_new_handle (val, error));
if (file_cols [MONO_FILE_FLAGS] & FILE_CONTAINS_NO_METADATA)
MONO_HANDLE_SETVAL (info, location, guint32, 0);
else
MONO_HANDLE_SETVAL (info, location, guint32, RESOURCE_LOCATION_EMBEDDED);
break;
case MONO_IMPLEMENTATION_ASSEMBLYREF:
i = cols [MONO_MANIFEST_IMPLEMENTATION] >> MONO_IMPLEMENTATION_BITS;
mono_assembly_load_reference (assembly->image, i - 1);
if (assembly->image->references [i - 1] == REFERENCE_MISSING) {
mono_error_set_file_not_found (error, NULL, "Assembly %d referenced from assembly %s not found ", i - 1, assembly->image->name);
goto leave;
}
MonoReflectionAssemblyHandle assm_obj;
assm_obj = mono_assembly_get_object_handle (assembly->image->references [i - 1], error);
goto_if_nok (error, leave);
MONO_HANDLE_SET (info, assembly, assm_obj);
/* Obtain info recursively */
get_manifest_resource_info_internal (MONO_HANDLE_GETVAL (assm_obj, assembly), name, info, error);
goto_if_nok (error, leave);
guint32 location;
location = MONO_HANDLE_GETVAL (info, location);
location |= RESOURCE_LOCATION_ANOTHER_ASSEMBLY;
MONO_HANDLE_SETVAL (info, location, guint32, location);
break;
case MONO_IMPLEMENTATION_EXP_TYPE:
g_assert_not_reached ();
break;
}
}
result = TRUE;
leave:
HANDLE_FUNCTION_RETURN_VAL (result);
}
MonoBoolean
ves_icall_System_Reflection_RuntimeAssembly_GetManifestResourceInfoInternal (MonoQCallAssemblyHandle assembly_h, MonoStringHandle name, MonoManifestResourceInfoHandle info_h, MonoError *error)
{
return get_manifest_resource_info_internal (assembly_h.assembly, name, info_h, error);
}
static gboolean
add_module_to_modules_array (MonoArrayHandle dest, int *dest_idx, MonoImage* module, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
if (module) {
MonoReflectionModuleHandle rm = mono_module_get_object_handle (module, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, *dest_idx, rm);
++(*dest_idx);
}
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
static gboolean
add_file_to_modules_array (MonoArrayHandle dest, int dest_idx, MonoImage *image, MonoTableInfo *table, int table_idx, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
guint32 cols [MONO_FILE_SIZE];
mono_metadata_decode_row (table, table_idx, cols, MONO_FILE_SIZE);
if (cols [MONO_FILE_FLAGS] & FILE_CONTAINS_NO_METADATA) {
MonoReflectionModuleHandle rm = mono_module_file_get_object_handle (image, table_idx, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, dest_idx, rm);
} else {
MonoImage *m = mono_image_load_file_for_image_checked (image, table_idx + 1, error);
goto_if_nok (error, leave);
if (!m) {
const char *filename = mono_metadata_string_heap (image, cols [MONO_FILE_NAME]);
mono_error_set_simple_file_not_found (error, filename);
goto leave;
}
MonoReflectionModuleHandle rm = mono_module_get_object_handle (m, error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, dest_idx, rm);
}
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetModulesInternal (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res_h, MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoClass *klass;
int i, j, file_count = 0;
MonoImage **modules;
guint32 module_count, real_module_count;
MonoTableInfo *table;
MonoImage *image = assembly->image;
g_assert (image != NULL);
g_assert (!assembly_is_dynamic (assembly));
table = &image->tables [MONO_TABLE_FILE];
file_count = table_info_get_rows (table);
modules = image->modules;
module_count = image->module_count;
real_module_count = 0;
for (i = 0; i < module_count; ++i)
if (modules [i])
real_module_count ++;
klass = mono_class_get_module_class ();
MonoArrayHandle res = mono_array_new_handle (klass, 1 + real_module_count + file_count, error);
return_if_nok (error);
MonoReflectionModuleHandle image_obj = mono_module_get_object_handle (image, error);
return_if_nok (error);
MONO_HANDLE_ARRAY_SETREF (res, 0, image_obj);
j = 1;
for (i = 0; i < module_count; ++i)
if (!add_module_to_modules_array (res, &j, modules[i], error))
return;
for (i = 0; i < file_count; ++i, ++j) {
if (!add_file_to_modules_array (res, j, image, table, i, error))
return;
}
HANDLE_ON_STACK_SET (res_h, MONO_HANDLE_RAW (res));
}
MonoReflectionMethodHandle
ves_icall_GetCurrentMethod (MonoError *error)
{
MonoMethod *m = mono_method_get_last_managed ();
if (!m) {
mono_error_set_not_supported (error, "Stack walks are not supported on this platform.");
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
}
while (m->is_inflated)
m = ((MonoMethodInflated*)m)->declaring;
return mono_method_get_object_handle (m, NULL, error);
}
static MonoMethod*
mono_method_get_equivalent_method (MonoMethod *method, MonoClass *klass)
{
int offset = -1, i;
if (method->is_inflated && ((MonoMethodInflated*)method)->context.method_inst) {
ERROR_DECL (error);
MonoMethod *result;
MonoMethodInflated *inflated = (MonoMethodInflated*)method;
//method is inflated, we should inflate it on the other class
MonoGenericContext ctx;
ctx.method_inst = inflated->context.method_inst;
ctx.class_inst = inflated->context.class_inst;
if (mono_class_is_ginst (klass))
ctx.class_inst = mono_class_get_generic_class (klass)->context.class_inst;
else if (mono_class_is_gtd (klass))
ctx.class_inst = mono_class_get_generic_container (klass)->context.class_inst;
result = mono_class_inflate_generic_method_full_checked (inflated->declaring, klass, &ctx, error);
g_assert (is_ok (error)); /* FIXME don't swallow the error */
return result;
}
mono_class_setup_methods (method->klass);
if (mono_class_has_failure (method->klass))
return NULL;
int mcount = mono_class_get_method_count (method->klass);
MonoMethod **method_klass_methods = m_class_get_methods (method->klass);
for (i = 0; i < mcount; ++i) {
if (method_klass_methods [i] == method) {
offset = i;
break;
}
}
mono_class_setup_methods (klass);
if (mono_class_has_failure (klass))
return NULL;
g_assert (offset >= 0 && offset < mono_class_get_method_count (klass));
return m_class_get_methods (klass) [offset];
}
MonoReflectionMethodHandle
ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodFromHandleInternalType_native (MonoMethod *method, MonoType *type, MonoBoolean generic_check, MonoError *error)
{
MonoClass *klass;
if (type && generic_check) {
klass = mono_class_from_mono_type_internal (type);
if (mono_class_get_generic_type_definition (method->klass) != mono_class_get_generic_type_definition (klass))
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
if (method->klass != klass) {
method = mono_method_get_equivalent_method (method, klass);
if (!method)
return MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE);
}
} else if (type)
klass = mono_class_from_mono_type_internal (type);
else
klass = method->klass;
return mono_method_get_object_handle (method, klass, error);
}
MonoReflectionMethodBodyHandle
ves_icall_System_Reflection_RuntimeMethodInfo_GetMethodBodyInternal (MonoMethod *method, MonoError *error)
{
return mono_method_body_get_object_handle (method, error);
}
MonoReflectionAssemblyHandle
ves_icall_System_Reflection_Assembly_GetExecutingAssembly (MonoStackCrawlMark *stack_mark, MonoError *error)
{
MonoAssembly *assembly;
assembly = mono_runtime_get_caller_from_stack_mark (stack_mark);
g_assert (assembly);
return mono_assembly_get_object_handle (assembly, error);
}
MonoReflectionAssemblyHandle
ves_icall_System_Reflection_Assembly_GetEntryAssembly (MonoError *error)
{
MonoAssembly *assembly = mono_runtime_get_entry_assembly ();
if (!assembly)
return MONO_HANDLE_CAST (MonoReflectionAssembly, NULL_HANDLE);
return mono_assembly_get_object_handle (assembly, error);
}
MonoReflectionAssemblyHandle
ves_icall_System_Reflection_Assembly_GetCallingAssembly (MonoError *error)
{
MonoMethod *m;
MonoMethod *dest;
dest = NULL;
mono_stack_walk_no_il (get_executing, &dest);
m = dest;
mono_stack_walk_no_il (get_caller_no_reflection, &dest);
if (!dest)
dest = m;
if (!m) {
mono_error_set_not_supported (error, "Stack walks are not supported on this platform.");
return MONO_HANDLE_CAST (MonoReflectionAssembly, NULL_HANDLE);
}
return mono_assembly_get_object_handle (m_class_get_image (dest->klass)->assembly, error);
}
void
ves_icall_System_RuntimeType_getFullName (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoBoolean full_name,
MonoBoolean assembly_qualified, MonoError *error)
{
MonoType *type = type_handle.type;
MonoTypeNameFormat format;
gchar *name;
if (full_name)
format = assembly_qualified ?
MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED :
MONO_TYPE_NAME_FORMAT_FULL_NAME;
else
format = MONO_TYPE_NAME_FORMAT_REFLECTION;
name = mono_type_get_name_full (type, format);
if (!name)
return;
if (full_name && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
g_free (name);
return;
}
HANDLE_ON_STACK_SET (res, mono_string_new_checked (name, error));
g_free (name);
}
MonoAssemblyName *
ves_icall_System_Reflection_AssemblyName_GetNativeName (MonoAssembly *mass)
{
return &mass->aname;
}
static gboolean
mono_module_type_is_visible (MonoTableInfo *tdef, MonoImage *image, int type)
{
guint32 attrs, visibility;
do {
attrs = mono_metadata_decode_row_col (tdef, type - 1, MONO_TYPEDEF_FLAGS);
visibility = attrs & TYPE_ATTRIBUTE_VISIBILITY_MASK;
if (visibility != TYPE_ATTRIBUTE_PUBLIC && visibility != TYPE_ATTRIBUTE_NESTED_PUBLIC)
return FALSE;
} while ((type = mono_metadata_token_index (mono_metadata_nested_in_typedef (image, type))));
return TRUE;
}
static void
image_get_type (MonoImage *image, MonoTableInfo *tdef, int table_idx, int count, MonoArrayHandle res, MonoArrayHandle exceptions, MonoBoolean exportedOnly, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
ERROR_DECL (klass_error);
MonoClass *klass = mono_class_get_checked (image, table_idx | MONO_TOKEN_TYPE_DEF, klass_error);
if (klass) {
MonoReflectionTypeHandle rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
return_if_nok (error);
MONO_HANDLE_ARRAY_SETREF (res, count, rt);
} else {
MonoExceptionHandle ex = mono_error_convert_to_exception_handle (klass_error);
MONO_HANDLE_ARRAY_SETREF (exceptions, count, ex);
}
HANDLE_FUNCTION_RETURN ();
}
static MonoArrayHandle
mono_module_get_types (MonoImage *image, MonoArrayHandleOut exceptions, MonoBoolean exportedOnly, MonoError *error)
{
/* FIXME: metadata-update */
MonoTableInfo *tdef = &image->tables [MONO_TABLE_TYPEDEF];
int rows = table_info_get_rows (tdef);
int i, count;
/* we start the count from 1 because we skip the special type <Module> */
if (exportedOnly) {
count = 0;
for (i = 1; i < rows; ++i) {
if (mono_module_type_is_visible (tdef, image, i + 1))
count++;
}
} else {
count = rows - 1;
}
MonoArrayHandle res = mono_array_new_handle (mono_defaults.runtimetype_class, count, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
MONO_HANDLE_ASSIGN (exceptions, mono_array_new_handle (mono_defaults.exception_class, count, error));
return_val_if_nok (error, NULL_HANDLE_ARRAY);
count = 0;
for (i = 1; i < rows; ++i) {
if (!exportedOnly || mono_module_type_is_visible (tdef, image, i+1)) {
image_get_type (image, tdef, i + 1, count, res, exceptions, exportedOnly, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
count++;
}
}
return res;
}
static void
append_module_types (MonoArrayHandleOut res, MonoArrayHandleOut exceptions, MonoImage *image, MonoBoolean exportedOnly, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoArrayHandle ex2 = MONO_HANDLE_NEW (MonoArray, NULL);
MonoArrayHandle res2 = mono_module_get_types (image, ex2, exportedOnly, error);
goto_if_nok (error, leave);
/* Append the new types to the end of the array */
if (mono_array_handle_length (res2) > 0) {
guint32 len1, len2;
len1 = mono_array_handle_length (res);
len2 = mono_array_handle_length (res2);
MonoArrayHandle res3 = mono_array_new_handle (mono_defaults.runtimetype_class, len1 + len2, error);
goto_if_nok (error, leave);
mono_array_handle_memcpy_refs (res3, 0, res, 0, len1);
mono_array_handle_memcpy_refs (res3, len1, res2, 0, len2);
MONO_HANDLE_ASSIGN (res, res3);
MonoArrayHandle ex3 = mono_array_new_handle (mono_defaults.runtimetype_class, len1 + len2, error);
goto_if_nok (error, leave);
mono_array_handle_memcpy_refs (ex3, 0, exceptions, 0, len1);
mono_array_handle_memcpy_refs (ex3, len1, ex2, 0, len2);
MONO_HANDLE_ASSIGN (exceptions, ex3);
}
leave:
HANDLE_FUNCTION_RETURN ();
}
static void
set_class_failure_in_array (MonoArrayHandle exl, int i, MonoClass *klass)
{
HANDLE_FUNCTION_ENTER ();
ERROR_DECL (unboxed_error);
mono_error_set_for_class_failure (unboxed_error, klass);
MonoExceptionHandle exc = MONO_HANDLE_NEW (MonoException, mono_error_convert_to_exception (unboxed_error));
MONO_HANDLE_ARRAY_SETREF (exl, i, exc);
HANDLE_FUNCTION_RETURN ();
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetExportedTypes (MonoQCallAssemblyHandle assembly_handle, MonoObjectHandleOnStack res_h,
MonoError *error)
{
MonoArrayHandle exceptions = MONO_HANDLE_NEW(MonoArray, NULL);
MonoAssembly *assembly = assembly_handle.assembly;
int i;
g_assert (!assembly_is_dynamic (assembly));
MonoImage *image = assembly->image;
MonoTableInfo *table = &image->tables [MONO_TABLE_FILE];
MonoArrayHandle res = mono_module_get_types (image, exceptions, TRUE, error);
return_if_nok (error);
/* Append data from all modules in the assembly */
int rows = table_info_get_rows (table);
for (i = 0; i < rows; ++i) {
if (!(mono_metadata_decode_row_col (table, i, MONO_FILE_FLAGS) & FILE_CONTAINS_NO_METADATA)) {
MonoImage *loaded_image = mono_assembly_load_module_checked (image->assembly, i + 1, error);
return_if_nok (error);
if (loaded_image) {
append_module_types (res, exceptions, loaded_image, TRUE, error);
return_if_nok (error);
}
}
}
/* the ReflectionTypeLoadException must have all the types (Types property),
* NULL replacing types which throws an exception. The LoaderException must
* contain all exceptions for NULL items.
*/
int len = mono_array_handle_length (res);
int ex_count = 0;
GList *list = NULL;
MonoReflectionTypeHandle t = MONO_HANDLE_NEW (MonoReflectionType, NULL);
for (i = 0; i < len; i++) {
MONO_HANDLE_ARRAY_GETREF (t, res, i);
if (!MONO_HANDLE_IS_NULL (t)) {
MonoClass *klass = mono_type_get_class_internal (MONO_HANDLE_GETVAL (t, type));
if ((klass != NULL) && mono_class_has_failure (klass)) {
/* keep the class in the list */
list = g_list_append (list, klass);
/* and replace Type with NULL */
MONO_HANDLE_ARRAY_SETREF (res, i, NULL_HANDLE);
}
} else {
ex_count ++;
}
}
if (list || ex_count) {
GList *tmp = NULL;
int j, length = g_list_length (list) + ex_count;
MonoArrayHandle exl = mono_array_new_handle (mono_defaults.exception_class, length, error);
if (!is_ok (error)) {
g_list_free (list);
return;
}
/* Types for which mono_class_get_checked () succeeded */
MonoExceptionHandle exc = MONO_HANDLE_NEW (MonoException, NULL);
for (i = 0, tmp = list; tmp; i++, tmp = tmp->next) {
set_class_failure_in_array (exl, i, (MonoClass*)tmp->data);
}
/* Types for which it don't */
for (j = 0; j < mono_array_handle_length (exceptions); ++j) {
MONO_HANDLE_ARRAY_GETREF (exc, exceptions, j);
if (!MONO_HANDLE_IS_NULL (exc)) {
g_assert (i < length);
MONO_HANDLE_ARRAY_SETREF (exl, i, exc);
i ++;
}
}
g_list_free (list);
list = NULL;
MONO_HANDLE_ASSIGN (exc, mono_get_exception_reflection_type_load_checked (res, exl, error));
return_if_nok (error);
mono_error_set_exception_handle (error, exc);
return;
}
HANDLE_ON_STACK_SET (res_h, MONO_HANDLE_RAW (res));
}
static void
get_top_level_forwarded_type (MonoImage *image, MonoTableInfo *table, int i, MonoArrayHandle types, MonoArrayHandle exceptions, int *aindex, int *exception_count)
{
ERROR_DECL (local_error);
guint32 cols [MONO_EXP_TYPE_SIZE];
MonoClass *klass;
MonoReflectionTypeHandle rt;
mono_metadata_decode_row (table, i, cols, MONO_EXP_TYPE_SIZE);
if (!(cols [MONO_EXP_TYPE_FLAGS] & TYPE_ATTRIBUTE_FORWARDER))
return;
guint32 impl = cols [MONO_EXP_TYPE_IMPLEMENTATION];
const char *name = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAME]);
const char *nspace = mono_metadata_string_heap (image, cols [MONO_EXP_TYPE_NAMESPACE]);
g_assert ((impl & MONO_IMPLEMENTATION_MASK) == MONO_IMPLEMENTATION_ASSEMBLYREF);
guint32 assembly_idx = impl >> MONO_IMPLEMENTATION_BITS;
mono_assembly_load_reference (image, assembly_idx - 1);
g_assert (image->references [assembly_idx - 1]);
HANDLE_FUNCTION_ENTER ();
if (image->references [assembly_idx - 1] == REFERENCE_MISSING) {
MonoExceptionHandle ex = MONO_HANDLE_NEW (MonoException, mono_get_exception_bad_image_format ("Invalid image"));
MONO_HANDLE_ARRAY_SETREF (types, *aindex, NULL_HANDLE);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, ex);
(*exception_count)++; (*aindex)++;
goto exit;
}
klass = mono_class_from_name_checked (image->references [assembly_idx - 1]->image, nspace, name, local_error);
if (!is_ok (local_error)) {
MonoExceptionHandle ex = mono_error_convert_to_exception_handle (local_error);
MONO_HANDLE_ARRAY_SETREF (types, *aindex, NULL_HANDLE);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, ex);
mono_error_cleanup (local_error);
(*exception_count)++; (*aindex)++;
goto exit;
}
rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), local_error);
if (!is_ok (local_error)) {
MonoExceptionHandle ex = mono_error_convert_to_exception_handle (local_error);
MONO_HANDLE_ARRAY_SETREF (types, *aindex, NULL_HANDLE);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, ex);
mono_error_cleanup (local_error);
(*exception_count)++; (*aindex)++;
goto exit;
}
MONO_HANDLE_ARRAY_SETREF (types, *aindex, rt);
MONO_HANDLE_ARRAY_SETREF (exceptions, *aindex, NULL_HANDLE);
(*aindex)++;
exit:
HANDLE_FUNCTION_RETURN ();
}
void
ves_icall_System_Reflection_RuntimeAssembly_GetTopLevelForwardedTypes (MonoQCallAssemblyHandle assembly_h, MonoObjectHandleOnStack res,
MonoError *error)
{
MonoAssembly *assembly = assembly_h.assembly;
MonoImage *image = assembly->image;
int count = 0;
g_assert (!assembly_is_dynamic (assembly));
MonoTableInfo *table = &image->tables [MONO_TABLE_EXPORTEDTYPE];
int rows = table_info_get_rows (table);
for (int i = 0; i < rows; ++i) {
if (mono_metadata_decode_row_col (table, i, MONO_EXP_TYPE_FLAGS) & TYPE_ATTRIBUTE_FORWARDER)
count ++;
}
MonoArrayHandle types = mono_array_new_handle (mono_defaults.runtimetype_class, count, error);
return_if_nok (error);
MonoArrayHandle exceptions = mono_array_new_handle (mono_defaults.exception_class, count, error);
return_if_nok (error);
int aindex = 0;
int exception_count = 0;
for (int i = 0; i < rows; ++i)
get_top_level_forwarded_type (image, table, i, types, exceptions, &aindex, &exception_count);
if (exception_count > 0) {
MonoExceptionHandle exc = MONO_HANDLE_NEW (MonoException, NULL);
MONO_HANDLE_ASSIGN (exc, mono_get_exception_reflection_type_load_checked (types, exceptions, error));
return_if_nok (error);
mono_error_set_exception_handle (error, exc);
return;
}
HANDLE_ON_STACK_SET (res, MONO_HANDLE_RAW (types));
}
void
ves_icall_Mono_RuntimeMarshal_FreeAssemblyName (MonoAssemblyName *aname, MonoBoolean free_struct)
{
mono_assembly_name_free_internal (aname);
if (free_struct)
g_free (aname);
}
void
ves_icall_AssemblyExtensions_ApplyUpdate (MonoAssembly *assm,
gconstpointer dmeta_bytes, int32_t dmeta_len,
gconstpointer dil_bytes, int32_t dil_len,
gconstpointer dpdb_bytes, int32_t dpdb_len)
{
ERROR_DECL (error);
g_assert (assm);
g_assert (dmeta_len >= 0);
MonoImage *image_base = assm->image;
g_assert (image_base);
#ifndef HOST_WASM
if (mono_is_debugger_attached ()) {
mono_error_set_not_supported (error, "Cannot use System.Reflection.Metadata.MetadataUpdater.ApplyChanges while debugger is attached");
mono_error_set_pending_exception (error);
return;
}
#endif
mono_image_load_enc_delta (MONO_ENC_DELTA_API, image_base, dmeta_bytes, dmeta_len, dil_bytes, dil_len, dpdb_bytes, dpdb_len, error);
mono_error_set_pending_exception (error);
}
gint32 ves_icall_AssemblyExtensions_ApplyUpdateEnabled (gint32 just_component_check)
{
// if just_component_check is true, we only care whether the hot_reload component is enabled,
// not whether the environment is appropriately setup to apply updates.
return mono_metadata_update_available () && (just_component_check || mono_metadata_update_enabled (NULL));
}
MonoReflectionTypeHandle
ves_icall_System_Reflection_RuntimeModule_GetGlobalType (MonoImage *image, MonoError *error)
{
MonoClass *klass;
g_assert (image);
MonoReflectionTypeHandle ret = MONO_HANDLE_CAST (MonoReflectionType, NULL_HANDLE);
if (image_is_dynamic (image) && ((MonoDynamicImage*)image)->initial_image)
/* These images do not have a global type */
goto leave;
klass = mono_class_get_checked (image, 1 | MONO_TOKEN_TYPE_DEF, error);
goto_if_nok (error, leave);
ret = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
leave:
return ret;
}
void
ves_icall_System_Reflection_RuntimeModule_GetGuidInternal (MonoImage *image, MonoArrayHandle guid_h, MonoError *error)
{
g_assert (mono_array_handle_length (guid_h) == 16);
if (!image->metadata_only) {
g_assert (image->heap_guid.data);
g_assert (image->heap_guid.size >= 16);
MONO_ENTER_NO_SAFEPOINTS;
guint8 *data = (guint8*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (guid_h), 1, 0);
memcpy (data, (guint8*)image->heap_guid.data, 16);
MONO_EXIT_NO_SAFEPOINTS;
} else {
MONO_ENTER_NO_SAFEPOINTS;
guint8 *data = (guint8*) mono_array_addr_with_size_internal (MONO_HANDLE_RAW (guid_h), 1, 0);
memset (data, 0, 16);
MONO_EXIT_NO_SAFEPOINTS;
}
}
void
ves_icall_System_Reflection_RuntimeModule_GetPEKind (MonoImage *image, gint32 *pe_kind, gint32 *machine, MonoError *error)
{
if (image_is_dynamic (image)) {
MonoDynamicImage *dyn = (MonoDynamicImage*)image;
*pe_kind = dyn->pe_kind;
*machine = dyn->machine;
}
else {
*pe_kind = (image->image_info->cli_cli_header.ch_flags & 0x3);
*machine = image->image_info->cli_header.coff.coff_machine;
}
}
gint32
ves_icall_System_Reflection_RuntimeModule_GetMDStreamVersion (MonoImage *image, MonoError *error)
{
return (image->md_version_major << 16) | (image->md_version_minor);
}
MonoArrayHandle
ves_icall_System_Reflection_RuntimeModule_InternalGetTypes (MonoImage *image, MonoError *error)
{
if (!image) {
MonoArrayHandle arr = mono_array_new_handle (mono_defaults.runtimetype_class, 0, error);
return arr;
} else {
MonoArrayHandle exceptions = MONO_HANDLE_NEW (MonoArray, NULL);
MonoArrayHandle res = mono_module_get_types (image, exceptions, FALSE, error);
return_val_if_nok (error, MONO_HANDLE_CAST(MonoArray, NULL_HANDLE));
int n = mono_array_handle_length (exceptions);
MonoExceptionHandle ex = MONO_HANDLE_NEW (MonoException, NULL);
for (int i = 0; i < n; ++i) {
MONO_HANDLE_ARRAY_GETREF(ex, exceptions, i);
if (!MONO_HANDLE_IS_NULL (ex)) {
mono_error_set_exception_handle (error, ex);
return MONO_HANDLE_CAST(MonoArray, NULL_HANDLE);
}
}
return res;
}
}
static gboolean
mono_memberref_is_method (MonoImage *image, guint32 token)
{
if (!image_is_dynamic (image)) {
int idx = mono_metadata_token_index (token);
if (idx <= 0 || mono_metadata_table_bounds_check (image, MONO_TABLE_MEMBERREF, idx)) {
return FALSE;
}
guint32 cols [MONO_MEMBERREF_SIZE];
const MonoTableInfo *table = &image->tables [MONO_TABLE_MEMBERREF];
mono_metadata_decode_row (table, idx - 1, cols, MONO_MEMBERREF_SIZE);
const char *sig = mono_metadata_blob_heap (image, cols [MONO_MEMBERREF_SIGNATURE]);
mono_metadata_decode_blob_size (sig, &sig);
return (*sig != 0x6);
} else {
ERROR_DECL (error);
MonoClass *handle_class;
if (!mono_lookup_dynamic_token_class (image, token, FALSE, &handle_class, NULL, error)) {
mono_error_cleanup (error); /* just probing, ignore error */
return FALSE;
}
return mono_defaults.methodhandle_class == handle_class;
}
}
static MonoGenericInst *
get_generic_inst_from_array_handle (MonoArrayHandle type_args)
{
int type_argc = mono_array_handle_length (type_args);
int size = MONO_SIZEOF_GENERIC_INST + type_argc * sizeof (MonoType *);
MonoGenericInst *ginst = (MonoGenericInst *)g_alloca (size);
memset (ginst, 0, MONO_SIZEOF_GENERIC_INST);
ginst->type_argc = type_argc;
for (int i = 0; i < type_argc; i++) {
MONO_HANDLE_ARRAY_GETVAL (ginst->type_argv[i], type_args, MonoType*, i);
}
ginst->is_open = FALSE;
for (int i = 0; i < type_argc; i++) {
if (mono_class_is_open_constructed_type (ginst->type_argv[i])) {
ginst->is_open = TRUE;
break;
}
}
return mono_metadata_get_canonical_generic_inst (ginst);
}
static void
init_generic_context_from_args_handles (MonoGenericContext *context, MonoArrayHandle type_args, MonoArrayHandle method_args)
{
if (!MONO_HANDLE_IS_NULL (type_args)) {
context->class_inst = get_generic_inst_from_array_handle (type_args);
} else {
context->class_inst = NULL;
}
if (!MONO_HANDLE_IS_NULL (method_args)) {
context->method_inst = get_generic_inst_from_array_handle (method_args);
} else {
context->method_inst = NULL;
}
}
static MonoType*
module_resolve_type_token (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoType *result = NULL;
MonoClass *klass;
int table = mono_metadata_token_table (token);
int index = mono_metadata_token_index (token);
MonoGenericContext context;
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if ((table != MONO_TABLE_TYPEDEF) && (table != MONO_TABLE_TYPEREF) &&
(table != MONO_TABLE_TYPESPEC)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
if (image_is_dynamic (image)) {
if ((table == MONO_TABLE_TYPEDEF) || (table == MONO_TABLE_TYPEREF)) {
ERROR_DECL (inner_error);
klass = (MonoClass *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, inner_error);
mono_error_cleanup (inner_error);
result = klass ? m_class_get_byval_arg (klass) : NULL;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
ERROR_DECL (inner_error);
klass = (MonoClass *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, &context, inner_error);
mono_error_cleanup (inner_error);
result = klass ? m_class_get_byval_arg (klass) : NULL;
goto leave;
}
if ((index <= 0) || mono_metadata_table_bounds_check (image, table, index)) {
*resolve_error = ResolveTokenError_OutOfRange;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
klass = mono_class_get_checked (image, token, error);
if (klass)
klass = mono_class_inflate_generic_class_checked (klass, &context, error);
goto_if_nok (error, leave);
if (klass)
result = m_class_get_byval_arg (klass);
leave:
HANDLE_FUNCTION_RETURN_VAL (result);
}
MonoType*
ves_icall_System_Reflection_RuntimeModule_ResolveTypeToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
return module_resolve_type_token (image, token, type_args, method_args, resolve_error, error);
}
static MonoMethod*
module_resolve_method_token (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoMethod *method = NULL;
int table = mono_metadata_token_table (token);
int index = mono_metadata_token_index (token);
MonoGenericContext context;
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if ((table != MONO_TABLE_METHOD) && (table != MONO_TABLE_METHODSPEC) &&
(table != MONO_TABLE_MEMBERREF)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
if (image_is_dynamic (image)) {
if (table == MONO_TABLE_METHOD) {
ERROR_DECL (inner_error);
method = (MonoMethod *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if ((table == MONO_TABLE_MEMBERREF) && !(mono_memberref_is_method (image, token))) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
ERROR_DECL (inner_error);
method = (MonoMethod *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, &context, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if ((index <= 0) || mono_metadata_table_bounds_check (image, table, index)) {
*resolve_error = ResolveTokenError_OutOfRange;
goto leave;
}
if ((table == MONO_TABLE_MEMBERREF) && (!mono_memberref_is_method (image, token))) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
method = mono_get_method_checked (image, token, NULL, &context, error);
leave:
HANDLE_FUNCTION_RETURN_VAL (method);
}
MonoMethod*
ves_icall_System_Reflection_RuntimeModule_ResolveMethodToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
return module_resolve_method_token (image, token, type_args, method_args, resolve_error, error);
}
MonoStringHandle
ves_icall_System_Reflection_RuntimeModule_ResolveStringToken (MonoImage *image, guint32 token, MonoResolveTokenError *resolve_error, MonoError *error)
{
int index = mono_metadata_token_index (token);
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if (mono_metadata_token_code (token) != MONO_TOKEN_STRING) {
*resolve_error = ResolveTokenError_BadTable;
return NULL_HANDLE_STRING;
}
if (image_is_dynamic (image)) {
ERROR_DECL (ignore_inner_error);
// FIXME ignoring error
// FIXME Push MONO_HANDLE_NEW to lower layers.
MonoStringHandle result = MONO_HANDLE_NEW (MonoString, (MonoString*)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, ignore_inner_error));
mono_error_cleanup (ignore_inner_error);
return result;
}
if ((index <= 0) || (index >= image->heap_us.size)) {
*resolve_error = ResolveTokenError_OutOfRange;
return NULL_HANDLE_STRING;
}
/* FIXME: What to do if the index points into the middle of a string ? */
return mono_ldstr_handle (image, index, error);
}
static MonoClassField*
module_resolve_field_token (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoClass *klass;
int table = mono_metadata_token_table (token);
int index = mono_metadata_token_index (token);
MonoGenericContext context;
MonoClassField *field = NULL;
*resolve_error = ResolveTokenError_Other;
/* Validate token */
if ((table != MONO_TABLE_FIELD) && (table != MONO_TABLE_MEMBERREF)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
if (image_is_dynamic (image)) {
if (table == MONO_TABLE_FIELD) {
ERROR_DECL (inner_error);
field = (MonoClassField *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, NULL, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if (mono_memberref_is_method (image, token)) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
ERROR_DECL (inner_error);
field = (MonoClassField *)mono_lookup_dynamic_token_class (image, token, FALSE, NULL, &context, inner_error);
mono_error_cleanup (inner_error);
goto leave;
}
if ((index <= 0) || mono_metadata_table_bounds_check (image, table, index)) {
*resolve_error = ResolveTokenError_OutOfRange;
goto leave;
}
if ((table == MONO_TABLE_MEMBERREF) && (mono_memberref_is_method (image, token))) {
*resolve_error = ResolveTokenError_BadTable;
goto leave;
}
init_generic_context_from_args_handles (&context, type_args, method_args);
field = mono_field_from_token_checked (image, token, &klass, &context, error);
leave:
HANDLE_FUNCTION_RETURN_VAL (field);
}
MonoClassField*
ves_icall_System_Reflection_RuntimeModule_ResolveFieldToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *resolve_error, MonoError *error)
{
return module_resolve_field_token (image, token, type_args, method_args, resolve_error, error);
}
MonoObjectHandle
ves_icall_System_Reflection_RuntimeModule_ResolveMemberToken (MonoImage *image, guint32 token, MonoArrayHandle type_args, MonoArrayHandle method_args, MonoResolveTokenError *error, MonoError *merror)
{
int table = mono_metadata_token_table (token);
*error = ResolveTokenError_Other;
switch (table) {
case MONO_TABLE_TYPEDEF:
case MONO_TABLE_TYPEREF:
case MONO_TABLE_TYPESPEC: {
MonoType *t = module_resolve_type_token (image, token, type_args, method_args, error, merror);
if (t) {
return MONO_HANDLE_CAST (MonoObject, mono_type_get_object_handle (t, merror));
}
else
return NULL_HANDLE;
}
case MONO_TABLE_METHOD:
case MONO_TABLE_METHODSPEC: {
MonoMethod *m = module_resolve_method_token (image, token, type_args, method_args, error, merror);
if (m) {
return MONO_HANDLE_CAST (MonoObject, mono_method_get_object_handle (m, m->klass, merror));
} else
return NULL_HANDLE;
}
case MONO_TABLE_FIELD: {
MonoClassField *f = module_resolve_field_token (image, token, type_args, method_args, error, merror);
if (f) {
return MONO_HANDLE_CAST (MonoObject, mono_field_get_object_handle (m_field_get_parent (f), f, merror));
}
else
return NULL_HANDLE;
}
case MONO_TABLE_MEMBERREF:
if (mono_memberref_is_method (image, token)) {
MonoMethod *m = module_resolve_method_token (image, token, type_args, method_args, error, merror);
if (m) {
return MONO_HANDLE_CAST (MonoObject, mono_method_get_object_handle (m, m->klass, merror));
} else
return NULL_HANDLE;
}
else {
MonoClassField *f = module_resolve_field_token (image, token, type_args, method_args, error, merror);
if (f) {
return MONO_HANDLE_CAST (MonoObject, mono_field_get_object_handle (m_field_get_parent (f), f, merror));
}
else
return NULL_HANDLE;
}
break;
default:
*error = ResolveTokenError_BadTable;
}
return NULL_HANDLE;
}
MonoArrayHandle
ves_icall_System_Reflection_RuntimeModule_ResolveSignature (MonoImage *image, guint32 token, MonoResolveTokenError *resolve_error, MonoError *error)
{
int table = mono_metadata_token_table (token);
int idx = mono_metadata_token_index (token);
MonoTableInfo *tables = image->tables;
guint32 sig, len;
const char *ptr;
*resolve_error = ResolveTokenError_OutOfRange;
/* FIXME: Support other tables ? */
if (table != MONO_TABLE_STANDALONESIG)
return NULL_HANDLE_ARRAY;
if (image_is_dynamic (image))
return NULL_HANDLE_ARRAY;
if ((idx == 0) || mono_metadata_table_bounds_check (image, MONO_TABLE_STANDALONESIG, idx))
return NULL_HANDLE_ARRAY;
sig = mono_metadata_decode_row_col (&tables [MONO_TABLE_STANDALONESIG], idx - 1, 0);
ptr = mono_metadata_blob_heap (image, sig);
len = mono_metadata_decode_blob_size (ptr, &ptr);
MonoArrayHandle res = mono_array_new_handle (mono_defaults.byte_class, len, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
// FIXME MONO_ENTER_NO_SAFEPOINTS instead of pin/gchandle.
MonoGCHandle h;
gpointer array_base = MONO_ARRAY_HANDLE_PIN (res, guint8, 0, &h);
memcpy (array_base, ptr, len);
mono_gchandle_free_internal (h);
return res;
}
static void
check_for_invalid_array_type (MonoType *type, MonoError *error)
{
gboolean allowed = TRUE;
char *name;
if (m_type_is_byref (type))
allowed = FALSE;
else if (type->type == MONO_TYPE_TYPEDBYREF)
allowed = FALSE;
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (m_class_is_byreflike (klass))
allowed = FALSE;
if (allowed)
return;
name = mono_type_get_full_name (klass);
mono_error_set_type_load_name (error, name, g_strdup (""), "");
}
static void
check_for_invalid_byref_or_pointer_type (MonoClass *klass, MonoError *error)
{
return;
}
void
ves_icall_RuntimeType_make_array_type (MonoQCallTypeHandle type_handle, int rank, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
check_for_invalid_array_type (type, error);
return_if_nok (error);
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoClass *aklass;
if (rank == 0) //single dimension array
aklass = mono_class_create_array (klass, 1);
else
aklass = mono_class_create_bounded_array (klass, rank, TRUE);
if (mono_class_has_failure (aklass)) {
mono_error_set_for_class_failure (error, aklass);
return;
}
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (aklass), error));
}
void
ves_icall_RuntimeType_make_byref_type (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
check_for_invalid_byref_or_pointer_type (klass, error);
return_if_nok (error);
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_this_arg (klass), error));
}
void
ves_icall_RuntimeType_make_pointer_type (MonoQCallTypeHandle type_handle, MonoObjectHandleOnStack res, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
mono_class_init_checked (klass, error);
return_if_nok (error);
check_for_invalid_byref_or_pointer_type (klass, error);
return_if_nok (error);
MonoClass *pklass = mono_class_create_ptr (type);
HANDLE_ON_STACK_SET (res, mono_type_get_object_checked (m_class_get_byval_arg (pklass), error));
}
MonoObjectHandle
ves_icall_System_Delegate_CreateDelegate_internal (MonoQCallTypeHandle type_handle, MonoObjectHandle target,
MonoReflectionMethodHandle info, MonoBoolean throwOnBindFailure, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *delegate_class = mono_class_from_mono_type_internal (type);
MonoMethod *method = MONO_HANDLE_GETVAL (info, method);
MonoMethodSignature *sig = mono_method_signature_internal (method);
mono_class_init_checked (delegate_class, error);
return_val_if_nok (error, NULL_HANDLE);
if (!(m_class_get_parent (delegate_class) == mono_defaults.multicastdelegate_class)) {
/* FIXME improve this exception message */
mono_error_set_execution_engine (error, "file %s: line %d (%s): assertion failed: (%s)", __FILE__, __LINE__,
__func__,
"delegate_class->parent == mono_defaults.multicastdelegate_class");
return NULL_HANDLE;
}
if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
if (!method->is_inflated) {
mono_error_set_argument (error, "method", " Cannot bind to the target method because its signature differs from that of the delegate type");
return NULL_HANDLE;
}
}
MonoObjectHandle delegate = mono_object_new_handle (delegate_class, error);
return_val_if_nok (error, NULL_HANDLE);
if (!method_is_dynamic (method) && (!MONO_HANDLE_IS_NULL (target) && method->flags & METHOD_ATTRIBUTE_VIRTUAL && method->klass != mono_handle_class (target))) {
method = mono_object_handle_get_virtual_method (target, method, error);
return_val_if_nok (error, NULL_HANDLE);
}
mono_delegate_ctor (delegate, target, NULL, method, error);
return_val_if_nok (error, NULL_HANDLE);
return delegate;
}
MonoMulticastDelegateHandle
ves_icall_System_Delegate_AllocDelegateLike_internal (MonoDelegateHandle delegate, MonoError *error)
{
MonoClass *klass = mono_handle_class (delegate);
g_assert (mono_class_has_parent (klass, mono_defaults.multicastdelegate_class));
MonoMulticastDelegateHandle ret = MONO_HANDLE_CAST (MonoMulticastDelegate, mono_object_new_handle (klass, error));
return_val_if_nok (error, MONO_HANDLE_CAST (MonoMulticastDelegate, NULL_HANDLE));
mono_get_runtime_callbacks ()->init_delegate (MONO_HANDLE_CAST (MonoDelegate, ret), NULL_HANDLE, NULL, NULL, error);
return ret;
}
MonoReflectionMethodHandle
ves_icall_System_Delegate_GetVirtualMethod_internal (MonoDelegateHandle delegate, MonoError *error)
{
MonoObjectHandle delegate_target = MONO_HANDLE_NEW_GET (MonoObject, delegate, target);
MonoMethod *m = mono_object_handle_get_virtual_method (delegate_target, MONO_HANDLE_GETVAL (delegate, method), error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
return mono_method_get_object_handle (m, m->klass, error);
}
/* System.Buffer */
static gint32
mono_array_get_byte_length (MonoArrayHandle array)
{
int length;
MonoClass * const klass = mono_handle_class (array);
// This resembles mono_array_get_length, but adds the loop.
if (mono_handle_array_has_bounds (array)) {
length = 1;
const int klass_rank = m_class_get_rank (klass);
for (int i = 0; i < klass_rank; ++ i)
length *= MONO_HANDLE_GETVAL (array, bounds [i].length);
} else {
length = mono_array_handle_length (array);
}
switch (m_class_get_byval_arg (m_class_get_element_class (klass))->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
return length;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
return length << 1;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_R4:
return length << 2;
case MONO_TYPE_I:
case MONO_TYPE_U:
return length * sizeof (gpointer);
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R8:
return length << 3;
default:
return -1;
}
}
/* System.Environment */
MonoArrayHandle
ves_icall_System_Environment_GetCommandLineArgs (MonoError *error)
{
MonoArrayHandle result = mono_runtime_get_main_args_handle (error);
return result;
}
void
ves_icall_System_Environment_Exit (int result)
{
mono_environment_exitcode_set (result);
if (!mono_runtime_try_shutdown ())
mono_thread_exit ();
mono_runtime_quit_internal ();
/* we may need to do some cleanup here... */
exit (result);
}
void
ves_icall_System_Environment_FailFast (MonoStringHandle message, MonoExceptionHandle exception, MonoStringHandle errorSource, MonoError *error)
{
if (MONO_HANDLE_IS_NULL (message)) {
g_warning ("Process terminated.");
} else {
char *msg = mono_string_handle_to_utf8 (message, error);
g_warning ("Process terminated due to \"%s\"", msg);
g_free (msg);
}
if (!MONO_HANDLE_IS_NULL (exception)) {
mono_print_unhandled_exception_internal ((MonoObject *) MONO_HANDLE_RAW (exception));
}
// NOTE: While this does trigger WER on Windows it doesn't quite provide all the
// information in the error dump that CoreCLR would. On Windows 7+ we should call
// RaiseFailFastException directly instead of relying on the C runtime doing it
// for us and pass it as much information as possible. On Windows 8+ we can also
// use the __fastfail intrinsic.
abort ();
}
gint32
ves_icall_System_Environment_get_TickCount (void)
{
/* this will overflow after ~24 days */
return (gint32) (mono_msec_boottime () & 0xffffffff);
}
gint64
ves_icall_System_Environment_get_TickCount64 (void)
{
return mono_msec_boottime ();
}
gpointer
ves_icall_RuntimeMethodHandle_GetFunctionPointer (MonoMethod *method, MonoError *error)
{
return mono_method_get_unmanaged_wrapper_ftnptr_internal (method, FALSE, error);
}
void*
mono_method_get_unmanaged_wrapper_ftnptr_internal (MonoMethod *method, gboolean only_unmanaged_callers_only, MonoError *error)
{
/* WISH: we should do this in managed */
if (G_UNLIKELY (mono_method_has_unmanaged_callers_only_attribute (method))) {
method = mono_marshal_get_managed_wrapper (method, NULL, (MonoGCHandle)0, error);
return_val_if_nok (error, NULL);
} else {
g_assert (!only_unmanaged_callers_only);
}
return mono_get_runtime_callbacks ()->get_ftnptr (method, error);
}
MonoBoolean
ves_icall_System_Diagnostics_Debugger_IsAttached_internal (void)
{
return mono_is_debugger_attached ();
}
MonoBoolean
ves_icall_System_Diagnostics_Debugger_IsLogging (void)
{
return mono_get_runtime_callbacks ()->debug_log_is_enabled
&& mono_get_runtime_callbacks ()->debug_log_is_enabled ();
}
void
ves_icall_System_Diagnostics_Debugger_Log (int level, MonoString *volatile* category, MonoString *volatile* message)
{
if (mono_get_runtime_callbacks ()->debug_log)
mono_get_runtime_callbacks ()->debug_log (level, *category, *message);
}
/* Only used for value types */
MonoObjectHandle
ves_icall_System_RuntimeType_CreateInstanceInternal (MonoQCallTypeHandle type_handle, MonoError *error)
{
MonoType *type = type_handle.type;
MonoClass *klass = mono_class_from_mono_type_internal (type);
(void)klass;
mono_class_init_checked (klass, error);
return_val_if_nok (error, NULL_HANDLE);
if (mono_class_is_nullable (klass))
/* No arguments -> null */
return NULL_HANDLE;
return mono_object_new_handle (klass, error);
}
MonoReflectionMethodHandle
ves_icall_RuntimeMethodInfo_get_base_method (MonoReflectionMethodHandle m, MonoBoolean definition, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (m, method);
MonoMethod *base = mono_method_get_base_method (method, definition, error);
return_val_if_nok (error, MONO_HANDLE_CAST (MonoReflectionMethod, NULL_HANDLE));
if (base == method) {
/* we want to short-circuit and return 'm' here. But we should
return the same method object that
mono_method_get_object_handle, below would return. Since
that call takes NULL for the reftype argument, it will take
base->klass as the reflected type for the MonoMethod. So we
need to check that m also has base->klass as the reflected
type. */
MonoReflectionTypeHandle orig_reftype = MONO_HANDLE_NEW_GET (MonoReflectionType, m, reftype);
MonoClass *orig_klass = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (orig_reftype, type));
if (base->klass == orig_klass)
return m;
}
return mono_method_get_object_handle (base, NULL, error);
}
MonoStringHandle
ves_icall_RuntimeMethodInfo_get_name (MonoReflectionMethodHandle m, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (m, method);
MonoStringHandle s = mono_string_new_handle (method->name, error);
return_val_if_nok (error, NULL_HANDLE_STRING);
MONO_HANDLE_SET (m, name, s);
return s;
}
void
ves_icall_System_ArgIterator_Setup (MonoArgIterator *iter, char* argsp, char* start)
{
iter->sig = *(MonoMethodSignature**)argsp;
g_assert (iter->sig->sentinelpos <= iter->sig->param_count);
g_assert (iter->sig->call_convention == MONO_CALL_VARARG);
iter->next_arg = 0;
/* FIXME: it's not documented what start is exactly... */
if (start) {
iter->args = start;
} else {
iter->args = argsp + sizeof (gpointer);
}
iter->num_args = iter->sig->param_count - iter->sig->sentinelpos;
/* g_print ("sig %p, param_count: %d, sent: %d\n", iter->sig, iter->sig->param_count, iter->sig->sentinelpos); */
}
void
ves_icall_System_ArgIterator_IntGetNextArg (MonoArgIterator *iter, MonoTypedRef *res)
{
guint32 i, arg_size;
gint32 align;
i = iter->sig->sentinelpos + iter->next_arg;
g_assert (i < iter->sig->param_count);
res->type = iter->sig->params [i];
res->klass = mono_class_from_mono_type_internal (res->type);
arg_size = mono_type_stack_size (res->type, &align);
#if defined(__arm__) || defined(__mips__)
iter->args = (guint8*)(((gsize)iter->args + (align) - 1) & ~(align - 1));
#endif
res->value = iter->args;
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
if (arg_size <= sizeof (gpointer)) {
int dummy;
int padding = arg_size - mono_type_size (res->type, &dummy);
res->value = (guint8*)res->value + padding;
}
#endif
iter->args = (char*)iter->args + arg_size;
iter->next_arg++;
/* g_print ("returning arg %d, type 0x%02x of size %d at %p\n", i, res->type->type, arg_size, res->value); */
}
void
ves_icall_System_ArgIterator_IntGetNextArgWithType (MonoArgIterator *iter, MonoTypedRef *res, MonoType *type)
{
guint32 i, arg_size;
gint32 align;
i = iter->sig->sentinelpos + iter->next_arg;
g_assert (i < iter->sig->param_count);
while (i < iter->sig->param_count) {
if (!mono_metadata_type_equal (type, iter->sig->params [i]))
continue;
res->type = iter->sig->params [i];
res->klass = mono_class_from_mono_type_internal (res->type);
/* FIXME: endianess issue... */
arg_size = mono_type_stack_size (res->type, &align);
#if defined(__arm__) || defined(__mips__)
iter->args = (guint8*)(((gsize)iter->args + (align) - 1) & ~(align - 1));
#endif
res->value = iter->args;
iter->args = (char*)iter->args + arg_size;
iter->next_arg++;
/* g_print ("returning arg %d, type 0x%02x of size %d at %p\n", i, res.type->type, arg_size, res.value); */
return;
}
/* g_print ("arg type 0x%02x not found\n", res.type->type); */
memset (res, 0, sizeof (MonoTypedRef));
}
MonoType*
ves_icall_System_ArgIterator_IntGetNextArgType (MonoArgIterator *iter)
{
gint i;
i = iter->sig->sentinelpos + iter->next_arg;
g_assert (i < iter->sig->param_count);
return iter->sig->params [i];
}
MonoObjectHandle
ves_icall_System_TypedReference_ToObject (MonoTypedRef* tref, MonoError *error)
{
return typed_reference_to_object (tref, error);
}
void
ves_icall_System_TypedReference_InternalMakeTypedReference (MonoTypedRef *res, MonoObjectHandle target, MonoArrayHandle fields, MonoReflectionTypeHandle last_field, MonoError *error)
{
MonoType *ftype = NULL;
int i;
memset (res, 0, sizeof (MonoTypedRef));
g_assert (mono_array_handle_length (fields) > 0);
(void)mono_handle_class (target);
int offset = 0;
for (i = 0; i < mono_array_handle_length (fields); ++i) {
MonoClassField *f;
MONO_HANDLE_ARRAY_GETVAL (f, fields, MonoClassField*, i);
g_assert (f);
if (i == 0)
offset = f->offset;
else
offset += f->offset - sizeof (MonoObject);
(void)mono_class_from_mono_type_internal (f->type);
ftype = f->type;
}
res->type = ftype;
res->klass = mono_class_from_mono_type_internal (ftype);
res->value = (guint8*)MONO_HANDLE_RAW (target) + offset;
}
void
ves_icall_System_Runtime_InteropServices_Marshal_Prelink (MonoReflectionMethodHandle method_h, MonoError *error)
{
MonoMethod *method = MONO_HANDLE_GETVAL (method_h, method);
if (!(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
return;
mono_lookup_pinvoke_call_internal (method, error);
/* create the wrapper, too? */
}
int
ves_icall_Interop_Sys_DoubleToString(double value, char *format, char *buffer, int bufferLength)
{
#if defined(TARGET_ARM)
/* workaround for faulty vcmp.f64 implementation on some 32bit ARM CPUs */
guint64 bits = *(guint64 *) &value;
if (bits == 0x1) { /* 4.9406564584124654E-324 */
g_assert (!strcmp (format, "%.40e"));
return snprintf (buffer, bufferLength, "%s", "4.9406564584124654417656879286822137236506e-324");
} else if (bits == 0x4) { /* 2E-323 */
g_assert (!strcmp (format, "%.40e"));
return snprintf (buffer, bufferLength, "%s", "1.9762625833649861767062751714728854894602e-323");
}
#endif
return snprintf(buffer, bufferLength, format, value);
}
static gboolean
add_modifier_to_array (MonoType *type, MonoArrayHandle dest, int dest_idx, MonoError *error)
{
HANDLE_FUNCTION_ENTER ();
MonoClass *klass = mono_class_from_mono_type_internal (type);
MonoReflectionTypeHandle rt;
rt = mono_type_get_object_handle (m_class_get_byval_arg (klass), error);
goto_if_nok (error, leave);
MONO_HANDLE_ARRAY_SETREF (dest, dest_idx, rt);
leave:
HANDLE_FUNCTION_RETURN_VAL (is_ok (error));
}
/*
* We return NULL for no modifiers so the corlib code can return Type.EmptyTypes
* and avoid useless allocations.
*/
static MonoArrayHandle
type_array_from_modifiers (MonoType *type, int optional, MonoError *error)
{
int i, count = 0;
int cmod_count = mono_type_custom_modifier_count (type);
if (cmod_count == 0)
goto fail;
for (i = 0; i < cmod_count; ++i) {
gboolean required;
(void) mono_type_get_custom_modifier (type, i, &required, error);
goto_if_nok (error, fail);
if ((optional && !required) || (!optional && required))
count++;
}
if (!count)
goto fail;
MonoArrayHandle res;
res = mono_array_new_handle (mono_defaults.systemtype_class, count, error);
goto_if_nok (error, fail);
count = 0;
for (i = 0; i < cmod_count; ++i) {
gboolean required;
MonoType *cmod_type = mono_type_get_custom_modifier (type, i, &required, error);
goto_if_nok (error, fail);
if ((optional && !required) || (!optional && required)) {
if (!add_modifier_to_array (cmod_type, res, count, error))
goto fail;
count++;
}
}
return res;
fail:
return MONO_HANDLE_NEW (MonoArray, NULL);
}
MonoArrayHandle
ves_icall_RuntimeParameterInfo_GetTypeModifiers (MonoReflectionTypeHandle rt, MonoObjectHandle member, int pos, MonoBoolean optional, MonoError *error)
{
MonoType *type = MONO_HANDLE_GETVAL (rt, type);
MonoClass *member_class = mono_handle_class (member);
MonoMethod *method = NULL;
MonoMethodSignature *sig;
if (mono_class_is_reflection_method_or_constructor (member_class)) {
method = MONO_HANDLE_GETVAL (MONO_HANDLE_CAST (MonoReflectionMethod, member), method);
} else if (m_class_get_image (member_class) == mono_defaults.corlib && !strcmp ("RuntimePropertyInfo", m_class_get_name (member_class))) {
MonoProperty *prop = MONO_HANDLE_GETVAL (MONO_HANDLE_CAST (MonoReflectionProperty, member), property);
if (!(method = prop->get))
method = prop->set;
g_assert (method);
} else if (strcmp (m_class_get_name (member_class), "DynamicMethod") == 0 && strcmp (m_class_get_name_space (member_class), "System.Reflection.Emit") == 0) {
MonoArrayHandle params = MONO_HANDLE_NEW_GET (MonoArray, MONO_HANDLE_CAST (MonoReflectionDynamicMethod, member), parameters);
MonoReflectionTypeHandle t = MONO_HANDLE_NEW (MonoReflectionType, NULL);
MONO_HANDLE_ARRAY_GETREF (t, params, pos);
type = mono_reflection_type_handle_mono_type (t, error);
return type_array_from_modifiers (type, optional, error);
} else {
char *type_name = mono_type_get_full_name (member_class);
mono_error_set_not_supported (error, "Custom modifiers on a ParamInfo with member %s are not supported", type_name);
g_free (type_name);
return NULL_HANDLE_ARRAY;
}
sig = mono_method_signature_internal (method);
if (pos == -1)
type = sig->ret;
else
type = sig->params [pos];
return type_array_from_modifiers (type, optional, error);
}
static MonoType*
get_property_type (MonoProperty *prop)
{
MonoMethodSignature *sig;
if (prop->get) {
sig = mono_method_signature_internal (prop->get);
return sig->ret;
} else if (prop->set) {
sig = mono_method_signature_internal (prop->set);
return sig->params [sig->param_count - 1];
}
return NULL;
}
MonoArrayHandle
ves_icall_RuntimePropertyInfo_GetTypeModifiers (MonoReflectionPropertyHandle property, MonoBoolean optional, MonoError *error)
{
MonoProperty *prop = MONO_HANDLE_GETVAL (property, property);
MonoType *type = get_property_type (prop);
if (!type)
return NULL_HANDLE_ARRAY;
return type_array_from_modifiers (type, optional, error);
}
/*
*Construct a MonoType suited to be used to decode a constant blob object.
*
* @type is the target type which will be constructed
* @blob_type is the blob type, for example, that comes from the constant table
* @real_type is the expected constructed type.
*/
static void
mono_type_from_blob_type (MonoType *type, MonoTypeEnum blob_type, MonoType *real_type)
{
type->type = blob_type;
type->data.klass = NULL;
if (blob_type == MONO_TYPE_CLASS)
type->data.klass = mono_defaults.object_class;
else if (real_type->type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (real_type->data.klass)) {
/* For enums, we need to use the base type */
type->type = MONO_TYPE_VALUETYPE;
type->data.klass = mono_class_from_mono_type_internal (real_type);
} else
type->data.klass = mono_class_from_mono_type_internal (real_type);
}
MonoObjectHandle
ves_icall_property_info_get_default_value (MonoReflectionPropertyHandle property_handle, MonoError* error)
{
MonoReflectionProperty* property = MONO_HANDLE_RAW (property_handle);
MonoType blob_type;
MonoProperty *prop = property->property;
MonoType *type = get_property_type (prop);
MonoTypeEnum def_type;
const char *def_value;
mono_class_init_internal (prop->parent);
if (!(prop->attrs & PROPERTY_ATTRIBUTE_HAS_DEFAULT)) {
mono_error_set_invalid_operation (error, NULL);
return NULL_HANDLE;
}
def_value = mono_class_get_property_default_value (prop, &def_type);
mono_type_from_blob_type (&blob_type, def_type, type);
return mono_get_object_from_blob (&blob_type, def_value, MONO_HANDLE_NEW (MonoString, NULL), error);
}
MonoBoolean
ves_icall_MonoCustomAttrs_IsDefinedInternal (MonoObjectHandle obj, MonoReflectionTypeHandle attr_type, MonoError *error)
{
MonoClass *attr_class = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (attr_type, type));
mono_class_init_checked (attr_class, error);
return_val_if_nok (error, FALSE);
MonoCustomAttrInfo *cinfo = mono_reflection_get_custom_attrs_info_checked (obj, error);
return_val_if_nok (error, FALSE);
if (!cinfo)
return FALSE;
gboolean found = mono_custom_attrs_has_attr (cinfo, attr_class);
if (!cinfo->cached)
mono_custom_attrs_free (cinfo);
return found;
}
MonoArrayHandle
ves_icall_MonoCustomAttrs_GetCustomAttributesInternal (MonoObjectHandle obj, MonoReflectionTypeHandle attr_type, MonoBoolean pseudoattrs, MonoError *error)
{
MonoClass *attr_class;
if (MONO_HANDLE_IS_NULL (attr_type))
attr_class = NULL;
else
attr_class = mono_class_from_mono_type_internal (MONO_HANDLE_GETVAL (attr_type, type));
if (attr_class) {
mono_class_init_checked (attr_class, error);
return_val_if_nok (error, NULL_HANDLE_ARRAY);
}
return mono_reflection_get_custom_attrs_by_type_handle (obj, attr_class, error);
}
MonoArrayHandle
ves_icall_MonoCustomAttrs_GetCustomAttributesDataInternal (MonoObjectHandle obj, MonoError *error)
{
return mono_reflection_get_custom_attrs_data_checked (obj, error);
}
#ifndef DISABLE_COM
int
ves_icall_System_Runtime_InteropServices_Marshal_GetHRForException_WinRT(MonoExceptionHandle ex, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.Marshal.GetHRForException_WinRT internal call is not implemented.");
return 0;
}
MonoObjectHandle
ves_icall_System_Runtime_InteropServices_Marshal_GetNativeActivationFactory(MonoObjectHandle type, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.Marshal.GetNativeActivationFactory internal call is not implemented.");
return NULL_HANDLE;
}
void*
ves_icall_System_Runtime_InteropServices_Marshal_GetRawIUnknownForComObjectNoAddRef(MonoObjectHandle obj, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.Marshal.GetRawIUnknownForComObjectNoAddRef internal call is not implemented.");
return NULL;
}
MonoObjectHandle
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_GetRestrictedErrorInfo(MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.GetRestrictedErrorInfo internal call is not implemented.");
return NULL_HANDLE;
}
MonoBoolean
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_RoOriginateLanguageException (int ierr, MonoStringHandle message, void* languageException, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.RoOriginateLanguageException internal call is not implemented.");
return FALSE;
}
void
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_RoReportUnhandledError (MonoObjectHandle oerr, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.RoReportUnhandledError internal call is not implemented.");
}
int
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_WindowsCreateString(MonoStringHandle sourceString, int length, void** hstring, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.WindowsCreateString internal call is not implemented.");
return 0;
}
int
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_WindowsDeleteString(void* hstring, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.WindowsDeleteString internal call is not implemented.");
return 0;
}
mono_unichar2*
ves_icall_System_Runtime_InteropServices_WindowsRuntime_UnsafeNativeMethods_WindowsGetStringRawBuffer(void* hstring, unsigned* length, MonoError *error)
{
mono_error_set_not_implemented (error, "System.Runtime.InteropServices.WindowsRuntime.UnsafeNativeMethods.WindowsGetStringRawBuffer internal call is not implemented.");
return NULL;
}
#endif
static const MonoIcallTableCallbacks *icall_table;
static mono_mutex_t icall_mutex;
static GHashTable *icall_hash = NULL;
typedef struct _MonoIcallHashTableValue {
gconstpointer method;
guint32 flags;
} MonoIcallHashTableValue;
void
mono_install_icall_table_callbacks (const MonoIcallTableCallbacks *cb)
{
g_assert (cb->version == MONO_ICALL_TABLE_CALLBACKS_VERSION);
icall_table = cb;
}
void
mono_icall_init (void)
{
#ifndef DISABLE_ICALL_TABLES
mono_icall_table_init ();
#endif
icall_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free);
mono_os_mutex_init (&icall_mutex);
}
static void
mono_icall_lock (void)
{
mono_locks_os_acquire (&icall_mutex, IcallLock);
}
static void
mono_icall_unlock (void)
{
mono_locks_os_release (&icall_mutex, IcallLock);
}
static void
add_internal_call_with_flags (const char *name, gconstpointer method, guint32 flags)
{
char *key = g_strdup (name);
MonoIcallHashTableValue *value = g_new (MonoIcallHashTableValue, 1);
if (key && value) {
value->method = method;
value->flags = flags;
mono_icall_lock ();
g_hash_table_insert (icall_hash, key, (gpointer)value);
mono_icall_unlock ();
}
}
/**
* mono_dangerous_add_internal_call_coop:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* Similar to \c mono_dangerous_add_raw_internal_call.
*
*/
void
mono_dangerous_add_internal_call_coop (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_COOPERATIVE);
}
/**
* mono_dangerous_add_internal_call_no_wrapper:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* Similar to \c mono_dangerous_add_raw_internal_call but with more requirements for correct
* operation.
*
* The \p method must NOT:
*
* Run for an unbounded amount of time without calling the mono runtime.
* Additionally, the method must switch to GC Safe mode to perform all blocking
* operations: performing blocking I/O, taking locks, etc. The method can't throw or raise
* exceptions or call other methods that will throw or raise exceptions since the runtime won't
* be able to detect exeptions and unwinder won't be able to correctly find last managed frame in callstack.
* This registration method is for icalls that needs very low overhead and follow all rules in their implementation.
*
*/
void
mono_dangerous_add_internal_call_no_wrapper (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_NO_WRAPPER);
}
/**
* mono_add_internal_call:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* This method surfaces the C function pointed by \p method as a method
* that has been surfaced in managed code with the method specified in
* \p name as an internal call.
*
* Internal calls are surfaced to all app domains loaded and they are
* accessibly by a type with the specified name.
*
* You must provide a fully qualified type name, that is namespaces
* and type name, followed by a colon and the method name, with an
* optional signature to bind.
*
* For example, the following are all valid declarations:
*
* \c MyApp.Services.ScriptService:Accelerate
*
* \c MyApp.Services.ScriptService:Slowdown(int,bool)
*
* You use method parameters in cases where there might be more than
* one surface method to managed code. That way you can register different
* internal calls for different method overloads.
*
* The internal calls are invoked with no marshalling. This means that .NET
* types like \c System.String are exposed as \c MonoString* parameters. This is
* different than the way that strings are surfaced in P/Invoke.
*
* For more information on how the parameters are marshalled, see the
* <a href="http://www.mono-project.com/docs/advanced/embedding/">Mono Embedding</a>
* page.
*
* See the <a href="mono-api-methods.html#method-desc">Method Description</a>
* reference for more information on the format of method descriptions.
*/
void
mono_add_internal_call (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_FOREIGN);
}
/**
* mono_dangerous_add_raw_internal_call:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
*
* Similar to \c mono_add_internal_call but with more requirements for correct
* operation.
*
* A thread running a dangerous raw internal call will avoid a thread state
* transition on entry and exit, but it must take responsiblity for cooperating
* with the Mono runtime.
*
* The \p method must NOT:
*
* Run for an unbounded amount of time without calling the mono runtime.
* Additionally, the method must switch to GC Safe mode to perform all blocking
* operations: performing blocking I/O, taking locks, etc.
*
*/
void
mono_dangerous_add_raw_internal_call (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_COOPERATIVE);
}
/**
* mono_add_internal_call_with_flags:
* \param name method specification to surface to the managed world
* \param method pointer to a C method to invoke when the method is called
* \param cooperative if \c TRUE, run icall in GC Unsafe (cooperatively suspended) mode,
* otherwise GC Safe (blocking)
*
* Like \c mono_add_internal_call, but if \p cooperative is \c TRUE the added
* icall promises that it will use the coopertive API to inform the runtime
* when it is running blocking operations, that it will not run for unbounded
* amounts of time without safepointing, and that it will not hold managed
* object references across suspend safepoints.
*
* If \p cooperative is \c FALSE, run the icall in GC Safe mode - the icall may
* block. The icall must obey the GC Safe rules, e.g. it must not touch
* unpinned managed memory.
*
*/
void
mono_add_internal_call_with_flags (const char *name, gconstpointer method, gboolean cooperative)
{
add_internal_call_with_flags (name, method, cooperative ? MONO_ICALL_FLAGS_COOPERATIVE : MONO_ICALL_FLAGS_FOREIGN);
}
void
mono_add_internal_call_internal (const char *name, gconstpointer method)
{
add_internal_call_with_flags (name, method, MONO_ICALL_FLAGS_COOPERATIVE);
}
/*
* we should probably export this as an helper (handle nested types).
* Returns the number of chars written in buf.
*/
static int
concat_class_name (char *buf, int bufsize, MonoClass *klass)
{
int nspacelen, cnamelen;
nspacelen = strlen (m_class_get_name_space (klass));
cnamelen = strlen (m_class_get_name (klass));
if (nspacelen + cnamelen + 2 > bufsize)
return 0;
if (nspacelen) {
memcpy (buf, m_class_get_name_space (klass), nspacelen);
buf [nspacelen ++] = '.';
}
memcpy (buf + nspacelen, m_class_get_name (klass), cnamelen);
buf [nspacelen + cnamelen] = 0;
return nspacelen + cnamelen;
}
static void
no_icall_table (void)
{
g_assert_not_reached ();
}
gboolean
mono_is_missing_icall_addr (gconstpointer addr)
{
return addr == NULL || addr == no_icall_table;
}
/*
* Returns either NULL or no_icall_table for missing icalls.
*/
gconstpointer
mono_lookup_internal_call_full_with_flags (MonoMethod *method, gboolean warn_on_missing, guint32 *flags)
{
char *sigstart = NULL;
char *tmpsig = NULL;
char mname [2048];
char *classname = NULL;
int typelen = 0, mlen, siglen;
gconstpointer res = NULL;
gboolean locked = FALSE;
g_assert (method != NULL);
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
if (m_class_get_nested_in (method->klass)) {
int pos = concat_class_name (mname, sizeof (mname)-2, m_class_get_nested_in (method->klass));
if (!pos)
goto exit;
mname [pos++] = '/';
mname [pos] = 0;
typelen = concat_class_name (mname+pos, sizeof (mname)-pos-1, method->klass);
if (!typelen)
goto exit;
typelen += pos;
} else {
typelen = concat_class_name (mname, sizeof (mname), method->klass);
if (!typelen)
goto exit;
}
classname = g_strdup (mname);
mname [typelen] = ':';
mname [typelen + 1] = ':';
mlen = strlen (method->name);
memcpy (mname + typelen + 2, method->name, mlen);
sigstart = mname + typelen + 2 + mlen;
*sigstart = 0;
tmpsig = mono_signature_get_desc (mono_method_signature_internal (method), TRUE);
siglen = strlen (tmpsig);
if (typelen + mlen + siglen + 6 > sizeof (mname))
goto exit;
sigstart [0] = '(';
memcpy (sigstart + 1, tmpsig, siglen);
sigstart [siglen + 1] = ')';
sigstart [siglen + 2] = 0;
/* mono_marshal_get_native_wrapper () depends on this */
if (method->klass == mono_defaults.string_class && !strcmp (method->name, ".ctor")) {
res = (gconstpointer)ves_icall_System_String_ctor_RedirectToCreateString;
goto exit;
}
mono_icall_lock ();
locked = TRUE;
res = g_hash_table_lookup (icall_hash, mname);
if (res) {
MonoIcallHashTableValue *value = (MonoIcallHashTableValue *)res;
if (flags)
*flags = value->flags;
res = value->method;
goto exit;
}
/* try without signature */
*sigstart = 0;
res = g_hash_table_lookup (icall_hash, mname);
if (res) {
MonoIcallHashTableValue *value = (MonoIcallHashTableValue *)res;
if (flags)
*flags = value->flags;
res = value->method;
goto exit;
}
if (!icall_table) {
/* Fail only when the result is actually used */
res = (gconstpointer)no_icall_table;
goto exit;
} else {
gboolean uses_handles = FALSE;
g_assert (icall_table->lookup);
res = icall_table->lookup (method, classname, sigstart - mlen, sigstart, &uses_handles);
if (res && flags && uses_handles)
*flags = *flags | MONO_ICALL_FLAGS_USES_HANDLES;
mono_icall_unlock ();
locked = FALSE;
if (res)
goto exit;
if (warn_on_missing) {
g_warning ("cant resolve internal call to \"%s\" (tested without signature also)", mname);
g_print ("\nYour mono runtime and class libraries are out of sync.\n");
g_print ("The out of sync library is: %s\n", m_class_get_image (method->klass)->name);
g_print ("\nWhen you update one from git you need to update, compile and install\nthe other too.\n");
g_print ("Do not report this as a bug unless you're sure you have updated correctly:\nyou probably have a broken mono install.\n");
g_print ("If you see other errors or faults after this message they are probably related\n");
g_print ("and you need to fix your mono install first.\n");
}
res = NULL;
}
exit:
if (locked)
mono_icall_unlock ();
g_free (classname);
g_free (tmpsig);
return res;
}
/**
* mono_lookup_internal_call_full:
* \param method the method to look up
* \param uses_handles out argument if method needs handles around managed objects.
* \returns a pointer to the icall code for the given method. If
* \p uses_handles is not NULL, it will be set to TRUE if the method
* needs managed objects wrapped using the infrastructure in handle.h
*
* If the method is not found, warns and returns NULL.
*/
gconstpointer
mono_lookup_internal_call_full (MonoMethod *method, gboolean warn_on_missing, mono_bool *uses_handles, mono_bool *foreign)
{
if (uses_handles)
*uses_handles = FALSE;
if (foreign)
*foreign = FALSE;
guint32 flags = MONO_ICALL_FLAGS_NONE;
gconstpointer addr = mono_lookup_internal_call_full_with_flags (method, warn_on_missing, &flags);
if (uses_handles && (flags & MONO_ICALL_FLAGS_USES_HANDLES))
*uses_handles = TRUE;
if (foreign && (flags & MONO_ICALL_FLAGS_FOREIGN))
*foreign = TRUE;
return addr;
}
/**
* mono_lookup_internal_call:
*/
gpointer
mono_lookup_internal_call (MonoMethod *method)
{
return (gpointer)mono_lookup_internal_call_full (method, TRUE, NULL, NULL);
}
/*
* mono_lookup_icall_symbol:
*
* Given the icall METHOD, returns its C symbol.
*/
const char*
mono_lookup_icall_symbol (MonoMethod *m)
{
if (!icall_table)
return NULL;
g_assert (icall_table->lookup_icall_symbol);
gpointer func;
func = (gpointer)mono_lookup_internal_call_full (m, FALSE, NULL, NULL);
if (!func)
return NULL;
return icall_table->lookup_icall_symbol (func);
}
#if defined(TARGET_WIN32) && defined(TARGET_X86)
/*
* Under windows, the default pinvoke calling convention is STDCALL but
* we need CDECL.
*/
#define MONO_ICALL_SIGNATURE_CALL_CONVENTION MONO_CALL_C
#else
#define MONO_ICALL_SIGNATURE_CALL_CONVENTION 0
#endif
// Storage for these enums is pointer-sized as it gets replaced with MonoType*.
//
// mono_create_icall_signatures depends on this order. Handle with care.
typedef enum ICallSigType {
ICALL_SIG_TYPE_bool = 0x00,
ICALL_SIG_TYPE_boolean = ICALL_SIG_TYPE_bool,
ICALL_SIG_TYPE_double = 0x01,
ICALL_SIG_TYPE_float = 0x02,
ICALL_SIG_TYPE_int = 0x03,
ICALL_SIG_TYPE_int16 = 0x04,
ICALL_SIG_TYPE_int32 = ICALL_SIG_TYPE_int,
ICALL_SIG_TYPE_int8 = 0x05,
ICALL_SIG_TYPE_long = 0x06,
ICALL_SIG_TYPE_obj = 0x07,
ICALL_SIG_TYPE_object = ICALL_SIG_TYPE_obj,
ICALL_SIG_TYPE_ptr = 0x08,
ICALL_SIG_TYPE_ptrref = 0x09,
ICALL_SIG_TYPE_string = 0x0A,
ICALL_SIG_TYPE_uint16 = 0x0B,
ICALL_SIG_TYPE_uint32 = 0x0C,
ICALL_SIG_TYPE_uint8 = 0x0D,
ICALL_SIG_TYPE_ulong = 0x0E,
ICALL_SIG_TYPE_void = 0x0F,
ICALL_SIG_TYPE_sizet = 0x10
} ICallSigType;
#define ICALL_SIG_TYPES_1(a) ICALL_SIG_TYPE_ ## a,
#define ICALL_SIG_TYPES_2(a, b) ICALL_SIG_TYPES_1 (a ) ICALL_SIG_TYPES_1 (b)
#define ICALL_SIG_TYPES_3(a, b, c) ICALL_SIG_TYPES_2 (a, b ) ICALL_SIG_TYPES_1 (c)
#define ICALL_SIG_TYPES_4(a, b, c, d) ICALL_SIG_TYPES_3 (a, b, c ) ICALL_SIG_TYPES_1 (d)
#define ICALL_SIG_TYPES_5(a, b, c, d, e) ICALL_SIG_TYPES_4 (a, b, c, d ) ICALL_SIG_TYPES_1 (e)
#define ICALL_SIG_TYPES_6(a, b, c, d, e, f) ICALL_SIG_TYPES_5 (a, b, c, d, e) ICALL_SIG_TYPES_1 (f)
#define ICALL_SIG_TYPES_7(a, b, c, d, e, f, g) ICALL_SIG_TYPES_6 (a, b, c, d, e, f) ICALL_SIG_TYPES_1 (g)
#define ICALL_SIG_TYPES_8(a, b, c, d, e, f, g, h) ICALL_SIG_TYPES_7 (a, b, c, d, e, f, g) ICALL_SIG_TYPES_1 (h)
#define ICALL_SIG_TYPES(n, types) ICALL_SIG_TYPES_ ## n types
// A scheme to make these const would be nice.
static struct {
#define ICALL_SIG(n, xtypes) \
struct { \
MonoMethodSignature sig; \
gsize types [n]; \
} ICALL_SIG_NAME (n, xtypes);
ICALL_SIGS
MonoMethodSignature end; // terminal zeroed element
} mono_icall_signatures = {
#undef ICALL_SIG
#define ICALL_SIG(n, types) { { \
0, /* ret */ \
n, /* param_count */ \
-1, /* sentinelpos */ \
0, /* generic_param_count */ \
MONO_ICALL_SIGNATURE_CALL_CONVENTION, \
0, /* hasthis */ \
0, /* explicit_this */ \
1, /* pinvoke */ \
0, /* is_inflated */ \
0, /* has_type_parameters */ \
}, /* possible gap here, depending on MONO_ZERO_LEN_ARRAY */ \
{ ICALL_SIG_TYPES (n, types) } }, /* params and ret */
ICALL_SIGS
};
#undef ICALL_SIG
#define ICALL_SIG(n, types) MonoMethodSignature * const ICALL_SIG_NAME (n, types) = &mono_icall_signatures.ICALL_SIG_NAME (n, types).sig;
ICALL_SIGS
#undef ICALL_SIG
void
mono_create_icall_signatures (void)
{
// Fixup the mostly statically initialized icall signatures.
// x = m_class_get_byval_arg (x)
// Initialize ret with params [0] and params [i] with params [i + 1].
// ptrref is special
//
// FIXME This is a bit obscure.
typedef MonoMethodSignature G_MAY_ALIAS MonoMethodSignature_a;
typedef gsize G_MAY_ALIAS gsize_a;
MonoType * const lookup [ ] = {
m_class_get_byval_arg (mono_defaults.boolean_class), // ICALL_SIG_TYPE_bool
m_class_get_byval_arg (mono_defaults.double_class), // ICALL_SIG_TYPE_double
m_class_get_byval_arg (mono_defaults.single_class), // ICALL_SIG_TYPE_float
m_class_get_byval_arg (mono_defaults.int32_class), // ICALL_SIG_TYPE_int
m_class_get_byval_arg (mono_defaults.int16_class), // ICALL_SIG_TYPE_int16
m_class_get_byval_arg (mono_defaults.sbyte_class), // ICALL_SIG_TYPE_int8
m_class_get_byval_arg (mono_defaults.int64_class), // ICALL_SIG_TYPE_long
m_class_get_byval_arg (mono_defaults.object_class), // ICALL_SIG_TYPE_obj
m_class_get_byval_arg (mono_defaults.int_class), // ICALL_SIG_TYPE_ptr
mono_class_get_byref_type (mono_defaults.int_class), // ICALL_SIG_TYPE_ptrref
m_class_get_byval_arg (mono_defaults.string_class), // ICALL_SIG_TYPE_string
m_class_get_byval_arg (mono_defaults.uint16_class), // ICALL_SIG_TYPE_uint16
m_class_get_byval_arg (mono_defaults.uint32_class), // ICALL_SIG_TYPE_uint32
m_class_get_byval_arg (mono_defaults.byte_class), // ICALL_SIG_TYPE_uint8
m_class_get_byval_arg (mono_defaults.uint64_class), // ICALL_SIG_TYPE_ulong
m_class_get_byval_arg (mono_defaults.void_class), // ICALL_SIG_TYPE_void
m_class_get_byval_arg (mono_defaults.int_class), // ICALL_SIG_TYPE_sizet
};
MonoMethodSignature_a *sig = (MonoMethodSignature*)&mono_icall_signatures;
int n;
while ((n = sig->param_count)) {
--sig->param_count; // remove ret
gsize_a *types = (gsize_a*)(sig + 1);
for (int i = 0; i < n; ++i) {
gsize index = *types++;
g_assert (index < G_N_ELEMENTS (lookup));
// Casts on next line are attempt to follow strict aliasing rules,
// to ensure reading from *types precedes writing
// to params [].
*(gsize*)(i ? &sig->params [i - 1] : &sig->ret) = (gsize)lookup [index];
}
sig = (MonoMethodSignature*)types;
}
}
void
mono_register_jit_icall_info (MonoJitICallInfo *info, gconstpointer func, const char *name, MonoMethodSignature *sig, gboolean avoid_wrapper, const char *c_symbol)
{
// Duplicate initialization is allowed and racy, assuming it is equivalent.
info->name = name;
info->func = func;
info->sig = sig;
info->c_symbol = c_symbol;
// Fill in wrapper ahead of time, to just be func, to avoid
// later initializing it to anything else. So therefore, no wrapper.
if (avoid_wrapper) {
info->wrapper = func;
} else {
// Leave it alone in case of a race.
}
}
int
ves_icall_System_GC_GetCollectionCount (int generation)
{
return mono_gc_collection_count (generation);
}
int
ves_icall_System_GC_GetGeneration (MonoObjectHandle object, MonoError *error)
{
return mono_gc_get_generation (MONO_HANDLE_RAW (object));
}
int
ves_icall_System_GC_GetMaxGeneration (void)
{
return mono_gc_max_generation ();
}
gint64
ves_icall_System_GC_GetAllocatedBytesForCurrentThread (void)
{
return mono_gc_get_allocated_bytes_for_current_thread ();
}
guint64
ves_icall_System_GC_GetTotalAllocatedBytes (MonoBoolean precise, MonoError* error)
{
return mono_gc_get_total_allocated_bytes (precise);
}
void
ves_icall_System_GC_RecordPressure (gint64 value)
{
mono_gc_add_memory_pressure (value);
}
MonoBoolean
ves_icall_System_Threading_Thread_YieldInternal (void)
{
mono_threads_platform_yield ();
return TRUE;
}
gint32
ves_icall_System_Environment_get_ProcessorCount (void)
{
return mono_cpu_count ();
}
// Generate wrappers.
#define ICALL_TYPE(id,name,first) /* nothing */
#define ICALL(id,name,func) /* nothing */
#define NOHANDLES(inner) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL(func, ret, nargs, argtypes) MONO_HANDLE_REGISTER_ICALL_IMPLEMENT (func, ret, nargs, argtypes)
// Some native functions are exposed via multiple managed names.
// Producing a wrapper for these results in duplicate wrappers with the same names,
// which fails to compile. Do not produce such duplicate wrappers. Alternatively,
// a one line native function with a different name that calls the main one could be used.
// i.e. the wrapper would also have a different name.
#define HANDLES_REUSE_WRAPPER(...) /* nothing */
#define HANDLES(id, name, func, ret, nargs, argtypes) \
MONO_HANDLE_DECLARE (id, name, func, ret, nargs, argtypes); \
MONO_HANDLE_IMPLEMENT (id, name, func, ret, nargs, argtypes)
#include "metadata/icall-def.h"
#undef HANDLES
#undef HANDLES_REUSE_WRAPPER
#undef ICALL_TYPE
#undef ICALL
#undef NOHANDLES
#undef MONO_HANDLE_REGISTER_ICALL
| 1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/metadata/object-internals.h | /**
* \file
*/
#ifndef __MONO_OBJECT_INTERNALS_H__
#define __MONO_OBJECT_INTERNALS_H__
#include <mono/utils/mono-forward-internal.h>
#include <mono/metadata/object-forward.h>
#include <mono/metadata/handle-decl.h>
#include <mono/metadata/object.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/mempool.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/handle.h>
#include <mono/metadata/abi-details.h>
#include "mono/utils/mono-compiler.h"
#include "mono/utils/mono-error.h"
#include "mono/utils/mono-error-internals.h"
#include "mono/utils/mono-machine.h"
#include "mono/utils/mono-stack-unwinding.h"
#include "mono/utils/mono-tls.h"
#include "mono/utils/mono-coop-mutex.h"
#include <mono/metadata/icalls.h>
/* Use this as MONO_CHECK_ARG (arg,expr,) in functions returning void */
#define MONO_CHECK_ARG(arg, expr, retval) do { \
if (G_UNLIKELY (!(expr))) \
{ \
if (0) { (void)(arg); } /* check if the name exists */ \
ERROR_DECL (error); \
mono_error_set_argument_format (error, #arg, "assertion `%s' failed", #expr); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_argument_null (error, (argname), ""); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL_HANDLE (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL_HANDLE(arg, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_HANDLE_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, (argname), ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_NULL (arg,) in functions returning void */
#define MONO_CHECK_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_null_reference (error); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
MONO_COMPONENT_API MonoClass *
mono_class_create_array (MonoClass *element_class, uint32_t rank);
MonoArrayHandle
mono_array_new_specific_handle (MonoVTable *vtable, uintptr_t n, MonoError *error);
MonoArray*
mono_array_new_specific_checked (MonoVTable *vtable, uintptr_t n, MonoError *error);
/*
* Macros which cache.
* These should be used instead of the original versions.
*/
static inline MonoClass*
mono_array_class_get_cached_function (MonoClass *eclass, MonoClass **aclass)
{
MonoClass *a = *aclass;
if (a)
return a;
a = mono_class_create_array (eclass, 1);
g_assert (a);
if (a)
*aclass = a;
return *aclass;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_class_get_cached(eclass) (mono_array_class_get_cached_function ((eclass), &(eclass ## _array)))
static inline MonoArray*
mono_array_new_cached_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArray *arr = NULL;
if (is_ok (error))
arr = mono_array_new_specific_checked (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached(eclass, size, error) \
mono_array_new_cached_function (mono_array_class_get_cached (eclass), (size), (error))
static inline MonoArrayHandle
mono_array_new_cached_handle_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArrayHandle arr = NULL_HANDLE_ARRAY;
if (is_ok (error))
arr = mono_array_new_specific_handle (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached_handle(eclass, size, error) \
mono_array_new_cached_handle_function (mono_array_class_get_cached (eclass), (size), (error))
#ifdef MONO_BIG_ARRAYS
typedef uint64_t mono_array_size_t;
typedef int64_t mono_array_lower_bound_t;
#define MONO_ARRAY_MAX_INDEX G_MAXINT64
#define MONO_ARRAY_MAX_SIZE G_MAXUINT64
#else
typedef uint32_t mono_array_size_t;
typedef int32_t mono_array_lower_bound_t;
#define MONO_ARRAY_MAX_INDEX ((int32_t) 0x7fffffff)
#define MONO_ARRAY_MAX_SIZE ((uint32_t) 0xffffffff)
#endif
typedef struct {
mono_array_size_t length;
mono_array_lower_bound_t lower_bound;
} MonoArrayBounds;
struct _MonoArray {
MonoObject obj;
/* bounds is NULL for szarrays */
MonoArrayBounds *bounds;
/* total number of elements of the array */
mono_array_size_t max_length;
/* we use mono_64bitaligned_t to ensure proper alignment on platforms that need it */
mono_64bitaligned_t vector [MONO_ZERO_LEN_ARRAY];
};
/* match the layout of the managed definition of Span<T> */
#define MONO_DEFINE_SPAN_OF_T(name, type) \
typedef struct { \
type* _pointer; \
uint32_t _length; \
} name;
MONO_DEFINE_SPAN_OF_T (MonoSpanOfObjects, MonoObject*)
#define MONO_SIZEOF_MONO_ARRAY (MONO_STRUCT_OFFSET_CONSTANT (MonoArray, vector))
struct _MonoString {
MonoObject object;
int32_t length;
mono_unichar2 chars [MONO_ZERO_LEN_ARRAY];
};
#define MONO_SIZEOF_MONO_STRING (MONO_STRUCT_OFFSET (MonoString, chars))
#define mono_object_class(obj) (((MonoObject*)(obj))->vtable->klass)
#define mono_object_domain(obj) (((MonoObject*)(obj))->vtable->domain)
#define mono_string_chars_fast(s) ((mono_unichar2*)(s)->chars)
#define mono_string_length_fast(s) ((s)->length)
/**
* mono_array_length_internal:
* \param array a \c MonoArray*
* \returns the total number of elements in the array. This works for
* both vectors and multidimensional arrays.
*/
#define mono_array_length_internal(array) ((array)->max_length)
static inline
uintptr_t
mono_array_handle_length (MonoArrayHandle arr)
{
MONO_REQ_GC_UNSAFE_MODE;
return mono_array_length_internal (MONO_HANDLE_RAW (arr));
}
// Equivalent to mono_array_addr_with_size, except:
// 1. A macro instead of a function -- the types of size and index are open.
// 2. mono_array_addr_with_size could, but does not, do GC mode transitions.
#define mono_array_addr_with_size_fast(array,size,index) ( ((char*)(array)->vector) + (size) * (index) )
#define mono_array_addr_fast(array,type,index) ((type*)(void*) mono_array_addr_with_size_fast (array, sizeof (type), index))
#define mono_array_get_fast(array,type,index) ( *(type*)mono_array_addr_fast ((array), type, (index)) )
#define mono_array_set_fast(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_fast ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_fast(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_fast ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_fast(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_fast ((dest), void*, (destidx)); \
void **__s = mono_array_addr_fast ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
// _internal is like _fast, but preserves the preexisting subtlety of the closed types of things:
// int size
// uintptr_t idx
// in order to mimic non-_internal but without the GC mode transitions, or at least,
// to avoid the runtime using the embedding API, whether or not it has GC mode transitions.
static inline char*
mono_array_addr_with_size_internal (MonoArray *array, int size, uintptr_t idx)
{
return mono_array_addr_with_size_fast (array, size, idx);
}
#define mono_array_addr_internal(array,type,index) ((type*)(void*) mono_array_addr_with_size_internal (array, sizeof (type), index))
#define mono_array_get_internal(array,type,index) ( *(type*)mono_array_addr_internal ((array), type, (index)) )
#define mono_array_set_internal(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_internal ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_internal(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_internal ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_internal(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_internal ((dest), void*, (destidx)); \
void **__s = mono_array_addr_internal ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
static inline gboolean
mono_handle_array_has_bounds (MonoArrayHandle arr)
{
return MONO_HANDLE_GETVAL (arr, bounds) != NULL;
}
static inline void
mono_handle_array_get_bounds_dim (MonoArrayHandle arr, gint32 dim, MonoArrayBounds *bounds)
{
*bounds = MONO_HANDLE_GETVAL (arr, bounds [dim]);
}
#define mono_span_length(span) (span->_length)
#define mono_span_get(span,type,idx) (type)(!span->_pointer ? (type)0 : span->_pointer[idx])
#define mono_span_addr(span,type,idx) (type*)(span->_pointer + idx)
#define mono_span_setref(span,index,value) \
do { \
void **__p = (void **) mono_span_addr ((span), void*, (index)); \
mono_gc_wbarrier_generic_store_internal (__p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
static inline MonoSpanOfObjects
mono_span_create_from_object_array (MonoArray *arr) {
MonoSpanOfObjects span;
if (arr != NULL) {
span._length = (int32_t)mono_array_length_internal (arr);
span._pointer = mono_array_addr_fast (arr, MonoObject*, 0);
} else {
span._length = 0;
span._pointer = NULL;
}
return span;
}
typedef struct {
MonoObject obj;
} MonoMarshalByRefObject;
TYPED_HANDLE_DECL (MonoMarshalByRefObject);
/* This is a copy of System.AppDomain */
struct _MonoAppDomain {
MonoMarshalByRefObject mbr;
};
/* Safely access System.AppDomain from native code */
TYPED_HANDLE_DECL (MonoAppDomain);
typedef struct _MonoStringBuilder MonoStringBuilder;
TYPED_HANDLE_DECL (MonoStringBuilder);
struct _MonoStringBuilder {
MonoObject object;
MonoArray *chunkChars;
MonoStringBuilder* chunkPrevious; // Link to the block logically before this block
int chunkLength; // The index in ChunkChars that represent the end of the block
int chunkOffset; // The logial offset (sum of all characters in previous blocks)
int maxCapacity;
};
static inline int
mono_string_builder_capacity (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkChars->max_length;
}
static inline int
mono_string_builder_string_length (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkLength;
}
typedef struct {
MonoType *type;
gpointer value;
MonoClass *klass;
} MonoTypedRef;
typedef struct {
gpointer args;
} MonoArgumentHandle;
typedef struct {
MonoMethodSignature *sig;
gpointer args;
gint32 next_arg;
gint32 num_args;
} MonoArgIterator;
struct _MonoException {
MonoObject object;
MonoString *class_name;
MonoString *message;
MonoObject *_data;
MonoObject *inner_ex;
MonoString *help_link;
/* Stores the IPs and the generic sharing infos
(vtable/MRGCTX) of the frames. */
MonoArray *trace_ips;
MonoString *stack_trace;
MonoString *remote_stack_trace;
gint32 remote_stack_index;
/* Dynamic methods referenced by the stack trace */
MonoArray *dynamic_methods;
gint32 hresult;
MonoString *source;
MonoObject *serialization_manager;
MonoObject *captured_traces;
MonoArray *native_trace_ips;
gint32 caught_in_unmanaged;
};
typedef struct {
MonoException base;
} MonoSystemException;
TYPED_HANDLE_DECL (MonoSystemException);
typedef struct {
MonoObject object;
MonoObject *async_state;
MonoObject *handle;
MonoObject *async_delegate;
gpointer *data;
MonoObject *object_data;
MonoBoolean sync_completed;
MonoBoolean completed;
MonoBoolean endinvoke_called;
MonoObject *async_callback;
MonoObject *execution_context;
MonoObject *original_context;
gint64 add_time;
} MonoAsyncResult;
TYPED_HANDLE_DECL (MonoAsyncResult);
typedef struct {
MonoMarshalByRefObject object;
gpointer handle;
} MonoWaitHandle;
TYPED_HANDLE_DECL (MonoWaitHandle);
/* System.Threading.StackCrawlMark */
/*
* This type is used to identify the method where execution has entered
* the BCL during stack walks. The outermost public method should
* define it like this:
* StackCrawlMark stackMark = StackCrawlMark.LookForMyCaller;
* and pass the stackMark as a byref argument down the call chain
* until it reaches an icall.
*/
typedef enum {
STACK_CRAWL_ME = 0,
STACK_CRAWL_CALLER = 1,
STACK_CRAWL_CALLERS_CALLER = 2,
STACK_CRAWL_THREAD = 3
} MonoStackCrawlMark;
/* MonoSafeHandle is in class-internals.h. */
/* Safely access System.Net.Sockets.SafeSocketHandle from native code */
TYPED_HANDLE_DECL (MonoSafeHandle);
/* This corresponds to System.Type */
struct _MonoReflectionType {
MonoObject object;
MonoType *type;
};
/* Safely access System.Type from native code */
TYPED_HANDLE_DECL (MonoReflectionType);
/* This corresponds to System.Runtime.CompilerServices.QCallTypeHandle */
struct _MonoQCallTypeHandle {
gpointer _ptr;
MonoType *type;
};
typedef struct _MonoQCallTypeHandle MonoQCallTypeHandle;
/* This corresponds to System.Runtime.CompilerServices.QCallAssembly */
struct _MonoQCallAssemblyHandle {
gpointer _ptr;
MonoAssembly *assembly;
};
typedef struct _MonoQCallAssemblyHandle MonoQCallAssemblyHandle;
typedef struct {
MonoObject object;
MonoReflectionType *class_to_proxy;
MonoObject *context;
MonoObject *unwrapped_server;
gint32 target_domain_id;
MonoString *target_uri;
MonoObject *object_identity;
MonoObject *obj_TP;
MonoObject *stub_data;
} MonoRealProxy;
/* Safely access System.Runtime.Remoting.Proxies.RealProxy from native code */
TYPED_HANDLE_DECL (MonoRealProxy);
typedef struct _MonoIUnknown MonoIUnknown;
typedef struct _MonoIUnknownVTable MonoIUnknownVTable;
/* STDCALL on windows, CDECL everywhere else to work with XPCOM and MainWin COM */
#ifdef HOST_WIN32
#define STDCALL __stdcall
#else
#define STDCALL
#endif
struct _MonoIUnknownVTable
{
int (STDCALL *QueryInterface)(MonoIUnknown *pUnk, gconstpointer riid, gpointer* ppv);
int (STDCALL *AddRef)(MonoIUnknown *pUnk);
int (STDCALL *Release)(MonoIUnknown *pUnk);
};
struct _MonoIUnknown
{
const MonoIUnknownVTable *vtable;
};
typedef struct {
MonoMarshalByRefObject object;
MonoIUnknown *iunknown;
GHashTable* itf_hash;
MonoObject *synchronization_context;
} MonoComObject;
TYPED_HANDLE_DECL (MonoComObject);
typedef struct {
MonoRealProxy real_proxy;
MonoComObject *com_object;
gint32 ref_count;
} MonoComInteropProxy;
TYPED_HANDLE_DECL (MonoComInteropProxy);
typedef struct {
MonoObject object;
MonoRealProxy *rp;
MonoRemoteClass *remote_class;
MonoBoolean custom_type_info;
} MonoTransparentProxy;
/* Safely access System.Runtime.Remoting.Proxies.TransparentProxy from native code */
TYPED_HANDLE_DECL (MonoTransparentProxy);
typedef struct {
MonoObject obj;
MonoReflectionMethod *method;
MonoArray *args;
MonoArray *names;
MonoArray *arg_types;
MonoObject *ctx;
MonoObject *rval;
MonoObject *exc;
MonoAsyncResult *async_result;
guint32 call_type;
} MonoMethodMessage;
TYPED_HANDLE_DECL (MonoMethodMessage);
/* Keep in sync with the System.MonoAsyncCall */
typedef struct {
MonoObject object;
MonoMethodMessage *msg;
MonoMethod *cb_method;
MonoDelegate *cb_target;
MonoObject *state;
MonoObject *res;
MonoArray *out_args;
} MonoAsyncCall;
TYPED_HANDLE_DECL (MonoAsyncCall);
typedef struct {
MonoObject obj;
MonoArray *frames;
MonoArray *captured_traces;
MonoBoolean debug_info;
} MonoStackTrace;
TYPED_HANDLE_DECL (MonoStackTrace);
typedef struct {
MonoObject obj;
gint32 il_offset;
gint32 native_offset;
gint64 method_address;
gint32 method_index;
MonoReflectionMethod *method;
MonoString *filename;
gint32 line;
gint32 column;
MonoString *internal_method_name;
} MonoStackFrame;
TYPED_HANDLE_DECL (MonoStackFrame);
typedef enum {
MONO_THREAD_FLAG_DONT_MANAGE = 1, // Don't wait for or abort this thread
MONO_THREAD_FLAG_NAME_SET = 2, // Thread name set from managed code
MONO_THREAD_FLAG_CLEANUP_FROM_NATIVE = 4, // Thread initialized in native so clean up in native
} MonoThreadFlags;
struct _MonoThreadInfo;
typedef struct MonoThreadName {
char* volatile chars; // null check outside of lock
gint32 free; // bool
gint32 length;
} MonoThreadName;
void
mono_gstring_append_thread_name (GString*, MonoInternalThread*);
struct _MonoInternalThread {
MonoObject obj;
volatile int lock_thread_id; /* to be used as the pre-shifted thread id in thin locks */
MonoThreadHandle *handle;
gpointer native_handle;
MonoThreadName name;
guint32 state; /* must be accessed while longlived->synch_cs is locked */
MonoException *abort_exc;
MonoGCHandle abort_state_handle;
guint64 tid; /* This is accessed as a gsize in the code (so it can hold a 64bit pointer on systems that need it), but needs to reserve 64 bits of space on all machines as it corresponds to a field in managed code */
gsize debugger_thread; // FIXME switch to bool as soon as CI testing with corlib version bump works
gpointer *static_data;
struct _MonoThreadInfo *thread_info;
/* This is modified using atomic ops, so keep it a gint32 */
gint32 __interruption_requested;
/* data that must live as long as this managed object is not finalized
* or as long as the underlying thread is attached, whichever is
* longer */
MonoLongLivedThreadData *longlived;
MonoBoolean threadpool_thread;
guint8 apartment_state;
gint32 managed_id;
guint32 small_id;
MonoThreadManageCallback manage_callback;
gsize flags;
gpointer thread_pinning_ref;
gint32 priority;
GPtrArray *owned_mutexes;
MonoOSEvent *suspended;
gint32 self_suspended; // TRUE | FALSE
gsize thread_state;
/* Points to self, set when starting up/attaching */
struct _MonoInternalThread *internal_thread;
MonoException *pending_exception;
/* This is used only to check that we are in sync between the representation
* of MonoInternalThread in native and InternalThread in managed
*
* DO NOT RENAME! DO NOT ADD FIELDS AFTER! */
gpointer last;
};
typedef struct {
guint32 state;
MonoObject *additional;
} MonoStreamingContext;
typedef struct {
MonoObject object;
guint32 intType;
} MonoInterfaceTypeAttribute;
typedef struct {
MonoObject object;
guint32 intType;
} MonoClassInterfaceAttribute;
/* Safely access System.Delegate from native code */
TYPED_HANDLE_DECL (MonoDelegate);
typedef void (*InterpJitInfoFunc) (MonoJitInfo *ji, gpointer user_data);
/*
* Callbacks supplied by the runtime and called by the modules in metadata/
* This interface is easier to extend than adding a new function type +
* a new 'install' function for every callback.
*/
typedef struct {
gpointer (*create_ftnptr) (gpointer addr);
gpointer (*get_addr_from_ftnptr) (gpointer descr);
char* (*get_runtime_build_info) (void);
const char* (*get_runtime_build_version) (void);
gpointer (*get_vtable_trampoline) (MonoVTable *vtable, int slot_index);
gpointer (*get_imt_trampoline) (MonoVTable *vtable, int imt_slot_index);
gboolean (*imt_entry_inited) (MonoVTable *vtable, int imt_slot_index);
void (*set_cast_details) (MonoClass *from, MonoClass *to);
void (*debug_log) (int level, MonoString *category, MonoString *message);
gboolean (*debug_log_is_enabled) (void);
void (*init_delegate) (MonoDelegateHandle delegate, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoObject* (*runtime_invoke) (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
void* (*compile_method) (MonoMethod *method, MonoError *error);
gpointer (*create_jit_trampoline) (MonoMethod *method, MonoError *error);
/* used to free a dynamic method */
void (*free_method) (MonoMethod *method);
gpointer (*create_delegate_trampoline) (MonoClass *klass);
GHashTable *(*get_weak_field_indexes) (MonoImage *image);
gboolean (*is_interpreter_enabled) (void);
void (*init_mem_manager)(MonoMemoryManager*);
void (*free_mem_manager)(MonoMemoryManager*);
void (*metadata_update_published) (MonoAssemblyLoadContext *alc, uint32_t generation);
void (*get_jit_stats)(gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time);
void (*get_exception_stats)(guint32 *exception_count);
// Same as compile_method, but returns a MonoFtnDesc in llvmonly mode
gpointer (*get_ftnptr)(MonoMethod *method, MonoError *error);
void (*interp_jit_info_foreach)(InterpJitInfoFunc func, gpointer user_data);
gboolean (*interp_sufficient_stack)(gsize size);
} MonoRuntimeCallbacks;
typedef gboolean (*MonoInternalStackWalk) (MonoStackFrameInfo *frame, MonoContext *ctx, gpointer data);
typedef gboolean (*MonoInternalExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data);
typedef struct {
void (*mono_walk_stack_with_ctx) (MonoInternalStackWalk func, MonoContext *ctx, MonoUnwindOptions options, void *user_data);
void (*mono_walk_stack_with_state) (MonoInternalStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions options, void *user_data);
void (*mono_raise_exception) (MonoException *ex);
void (*mono_raise_exception_with_ctx) (MonoException *ex, MonoContext *ctx);
gboolean (*mono_exception_walk_trace) (MonoException *ex, MonoInternalExceptionFrameWalk func, gpointer user_data);
gboolean (*mono_install_handler_block_guard) (MonoThreadUnwindState *unwind_state);
void (*mono_uninstall_current_handler_block_guard) (void);
gboolean (*mono_current_thread_has_handle_block_guard) (void);
gboolean (*mono_above_abort_threshold) (void);
void (*mono_clear_abort_threshold) (void);
void (*mono_reraise_exception) (MonoException *ex);
} MonoRuntimeExceptionHandlingCallbacks;
MONO_COLD void mono_set_pending_exception (MonoException *exc);
void
mono_delegate_ctor (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoMethod *
mono_get_delegate_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_begin_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_end_invoke_checked (MonoClass *klass, MonoError *error);
void
mono_runtime_free_method (MonoMethod *method);
void
mono_install_callbacks (MonoRuntimeCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeCallbacks*
mono_get_runtime_callbacks (void);
void
mono_install_eh_callbacks (MonoRuntimeExceptionHandlingCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeExceptionHandlingCallbacks *
mono_get_eh_callbacks (void);
void
mono_raise_exception_deprecated (MonoException *ex);
void
mono_reraise_exception_deprecated (MonoException *ex);
void
mono_raise_exception_with_context (MonoException *ex, MonoContext *ctx);
void
mono_type_initialization_init (void);
int
mono_thread_kill (MonoInternalThread *thread, int signal);
MonoNativeTlsKey
mono_thread_get_tls_key (void);
gint32
mono_thread_get_tls_offset (void);
MonoNativeTlsKey
mono_domain_get_tls_key (void);
gint32
mono_domain_get_tls_offset (void);
/* Reflection and Reflection.Emit support */
/*
* Handling System.Type objects:
*
* Fields defined as System.Type in managed code should be defined as MonoObject*
* in unmanaged structures, and the monotype_cast () function should be used for
* casting them to MonoReflectionType* to avoid crashes/security issues when
* encountering instances of user defined subclasses of System.Type.
*/
#define IS_MONOTYPE(obj) (!(obj) || (m_class_get_image (mono_object_class ((obj))) == mono_defaults.corlib && ((MonoReflectionType*)(obj))->type != NULL))
#define IS_MONOTYPE_HANDLE(obj) IS_MONOTYPE (MONO_HANDLE_RAW (obj))
/* This should be used for accessing members of Type[] arrays */
#define mono_type_array_get(arr,index) monotype_cast (mono_array_get_internal ((arr), gpointer, (index)))
/*
* Cast an object to MonoReflectionType, making sure it is a System.MonoType or
* a subclass of it.
*/
static inline MonoReflectionType*
monotype_cast (MonoObject *obj)
{
g_assert (IS_MONOTYPE (obj));
return (MonoReflectionType*)obj;
}
/*
* The following structure must match the C# implementation in our corlib.
*/
struct _MonoReflectionMethod {
MonoObject object;
MonoMethod *method;
MonoString *name;
MonoReflectionType *reftype;
};
/* Safely access System.Reflection.MonoMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionMethod);
struct _MonoDelegate {
MonoObject object;
/* The compiled code of the target method */
gpointer method_ptr;
/* The invoke code */
gpointer invoke_impl;
MonoObject *target;
MonoMethod *method;
gpointer delegate_trampoline;
/* Extra argument passed to the target method in llvmonly mode */
gpointer extra_arg;
/*
* If non-NULL, this points to a memory location which stores the address of
* the compiled code of the method, or NULL if it is not yet compiled.
*/
guint8 **method_code;
gpointer interp_method;
/* Interp method that is executed when invoking the delegate */
gpointer interp_invoke_impl;
MonoReflectionMethod *method_info;
MonoReflectionMethod *original_method_info;
MonoObject *data;
MonoBoolean method_is_virtual;
};
typedef struct _MonoMulticastDelegate MonoMulticastDelegate;
struct _MonoMulticastDelegate {
MonoDelegate delegate;
MonoArray *delegates;
};
/* Safely access System.MulticastDelegate from native code */
TYPED_HANDLE_DECL (MonoMulticastDelegate);
struct _MonoReflectionField {
MonoObject object;
MonoClass *klass;
MonoClassField *field;
MonoString *name;
MonoReflectionType *type;
guint32 attrs;
};
/* Safely access System.Reflection.MonoField from native code */
TYPED_HANDLE_DECL (MonoReflectionField);
struct _MonoReflectionProperty {
MonoObject object;
MonoClass *klass;
MonoProperty *property;
};
/* Safely access System.Reflection.MonoProperty from native code */
TYPED_HANDLE_DECL (MonoReflectionProperty);
/*This is System.EventInfo*/
struct _MonoReflectionEvent {
MonoObject object;
};
/* Safely access System.Reflection.EventInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionEvent);
typedef struct {
MonoReflectionEvent object;
MonoClass *klass;
MonoEvent *event;
} MonoReflectionMonoEvent;
/* Safely access Systme.Reflection.MonoEvent from native code */
TYPED_HANDLE_DECL (MonoReflectionMonoEvent);
typedef struct {
MonoObject object;
} MonoReflectionParameter;
/* Safely access System.Reflection.ParameterInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionParameter);
struct _MonoReflectionMethodBody {
MonoObject object;
};
/* Safely access System.Reflection.MethodBody from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBody);
/* System.RuntimeAssembly */
struct _MonoReflectionAssembly {
MonoObject object;
MonoAssembly *assembly;
};
typedef struct {
MonoReflectionType *utype;
MonoArray *values;
MonoArray *names;
} MonoEnumInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *ret;
guint32 attrs;
guint32 implattrs;
guint32 callconv;
} MonoMethodInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *declaring_type;
MonoString *name;
MonoReflectionMethod *get;
MonoReflectionMethod *set;
guint32 attrs;
} MonoPropertyInfo;
typedef struct {
MonoReflectionType *declaring_type;
MonoReflectionType *reflected_type;
MonoString *name;
MonoReflectionMethod *add_method;
MonoReflectionMethod *remove_method;
MonoReflectionMethod *raise_method;
guint32 attrs;
MonoArray *other_methods;
} MonoEventInfo;
typedef struct {
MonoObject *member;
gint32 code_pos;
} MonoReflectionILTokenInfo;
typedef struct {
MonoObject object;
MonoArray *code;
gint32 code_len;
gint32 max_stack;
gint32 cur_stack;
MonoArray *locals;
MonoArray *ex_handlers;
gint32 num_token_fixups;
MonoArray *token_fixups;
} MonoReflectionILGen;
typedef struct {
MonoArray *handlers;
gint32 start;
gint32 len;
gint32 label;
} MonoILExceptionInfo;
typedef struct {
MonoObject *extype;
gint32 type;
gint32 start;
gint32 len;
gint32 filter_offset;
} MonoILExceptionBlock;
typedef struct {
MonoObject object;
MonoObject *catch_type;
gint32 filter_offset;
gint32 flags;
gint32 try_offset;
gint32 try_length;
gint32 handler_offset;
gint32 handler_length;
} MonoReflectionExceptionHandlingClause;
/* Safely access System.Reflection.ExceptionHandlingClause from native code */
TYPED_HANDLE_DECL (MonoReflectionExceptionHandlingClause);
typedef struct {
MonoObject object;
MonoReflectionType *local_type;
MonoBoolean is_pinned;
guint16 local_index;
} MonoReflectionLocalVariableInfo;
/* Safely access System.Reflection.LocalVariableInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionLocalVariableInfo);
typedef struct {
/*
* Must have the same layout as MonoReflectionLocalVariableInfo, since
* LocalBuilder inherits from it under net 2.0.
*/
MonoObject object;
MonoObject *type;
MonoBoolean is_pinned;
guint16 local_index;
MonoString *name;
} MonoReflectionLocalBuilder;
typedef struct {
MonoObject object;
gint32 count;
gint32 type;
gint32 eltype;
MonoString *guid;
MonoString *mcookie;
MonoString *marshaltype;
MonoObject *marshaltyperef;
gint32 param_num;
MonoBoolean has_size;
} MonoReflectionMarshal;
typedef struct {
MonoObject object;
MonoObject* methodb;
MonoString *name;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
guint32 attrs;
int position;
guint32 table_idx;
MonoObject *def_value;
} MonoReflectionParamBuilder;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionILGen *ilgen;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
guint32 table_idx;
guint32 call_conv;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoBoolean init_locals;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionCtorBuilder;
/* Safely access System.Reflection.Emit.ConstructorBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionCtorBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoObject *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
MonoString *name;
guint32 table_idx;
MonoArray *code;
MonoReflectionILGen *ilgen;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoArray *override_methods;
MonoString *dll;
MonoString *dllentry;
guint32 charset;
guint32 extra_flags;
guint32 native_cc;
guint32 call_conv;
MonoBoolean init_locals;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoArray *return_modreq;
MonoArray *return_modopt;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionMethodBuilder;
/* Safely access System.Reflection.Emit.MethodBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionType *parent;
MonoReflectionType *ret;
MonoArray *parameters;
MonoString *name;
guint32 table_idx;
guint32 call_conv;
} MonoReflectionArrayMethod;
/* Safely access System.Reflection.Emit.MonoArrayMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayMethod);
typedef struct {
MonoReflectionAssembly assembly;
MonoDynamicAssembly *dynamic_assembly;
MonoArray *modules;
MonoString *name;
MonoArray *cattrs;
MonoString *version;
MonoString *culture;
MonoArray *public_key_token;
MonoArray *loaded_modules;
guint32 access;
} MonoReflectionAssemblyBuilder;
/* Safely access System.Reflection.Emit.AssemblyBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionAssemblyBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoObject *type;
MonoString *name;
MonoObject *def_value;
gint32 offset;
MonoReflectionType *typeb;
MonoArray *rva_data;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
MonoClassField *handle;
MonoArray *modreq;
MonoArray *modopt;
} MonoReflectionFieldBuilder;
/* Safely access System.Reflection.Emit.FieldBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionFieldBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoString *name;
MonoObject *type;
MonoArray *parameters;
MonoArray *cattrs;
MonoObject *def_value;
MonoReflectionMethodBuilder *set_method;
MonoReflectionMethodBuilder *get_method;
gint32 table_idx;
MonoObject *type_builder;
MonoArray *returnModReq;
MonoArray *returnModOpt;
MonoArray *paramModReq;
MonoArray *paramModOpt;
guint32 call_conv;
} MonoReflectionPropertyBuilder;
/* System.RuntimeModule */
struct _MonoReflectionModule {
MonoObject obj;
MonoImage *image;
MonoReflectionAssembly *assembly;
MonoString *fqname;
MonoString *name;
MonoString *scopename;
MonoBoolean is_resource;
guint32 token;
};
/* Safely access System.Reflection.Module from native code */
TYPED_HANDLE_DECL (MonoReflectionModule);
typedef struct {
MonoReflectionModule module;
MonoDynamicImage *dynamic_image;
gint32 num_types;
MonoArray *types;
MonoArray *cattrs;
guint32 table_idx;
MonoReflectionAssemblyBuilder *assemblyb;
gboolean is_main;
MonoArray *resources;
GHashTable *unparented_classes;
MonoArray *table_indexes;
} MonoReflectionModuleBuilder;
/* Safely acess System.Reflection.Emit.ModuleBuidler from native code */
TYPED_HANDLE_DECL (MonoReflectionModuleBuilder);
typedef enum {
MonoTypeBuilderNew = 0,
MonoTypeBuilderEntered = 1,
MonoTypeBuilderFinished = 2
} MonoTypeBuilderState;
struct _MonoReflectionTypeBuilder {
MonoReflectionType type;
MonoString *name;
MonoString *nspace;
MonoObject *parent;
MonoReflectionType *nesting_type;
MonoArray *interfaces;
gint32 num_methods;
MonoArray *methods;
MonoArray *ctors;
MonoArray *properties;
gint32 num_fields;
MonoArray *fields;
MonoArray *events;
MonoArray *cattrs;
MonoArray *subtypes;
guint32 attrs;
guint32 table_idx;
MonoReflectionModuleBuilder *module;
gint32 class_size;
gint32 packing_size;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoReflectionType *created;
gint32 is_byreflike_set;
gint32 state;
};
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
gint32 rank;
} MonoReflectionArrayType;
/* Safely access System.Reflection.Emit.ArrayType (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayType);
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
} MonoReflectionDerivedType;
/* Safely access System.Reflection.Emit.SymbolType and subclasses (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionDerivedType);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tbuilder;
MonoReflectionMethodBuilder *mbuilder;
MonoString *name;
guint32 index;
MonoReflectionType *base_type;
MonoArray *iface_constraints;
MonoArray *cattrs;
guint32 attrs;
} MonoReflectionGenericParam;
/* Safely access System.Reflection.Emit.GenericTypeParameterBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericParam);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tb;
} MonoReflectionEnumBuilder;
/* Safely access System.Reflection.Emit.EnumBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionEnumBuilder);
typedef struct _MonoReflectionGenericClass MonoReflectionGenericClass;
struct _MonoReflectionGenericClass {
MonoReflectionType type;
MonoReflectionType *generic_type; /*Can be either a MonoType or a TypeBuilder*/
MonoArray *type_arguments;
};
/* Safely access System.Reflection.Emit.TypeBuilderInstantiation from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericClass);
typedef struct {
MonoObject obj;
MonoString *name;
MonoReflectionType *type;
MonoReflectionTypeBuilder *typeb;
MonoArray *cattrs;
MonoReflectionMethodBuilder *add_method;
MonoReflectionMethodBuilder *remove_method;
MonoReflectionMethodBuilder *raise_method;
MonoArray *other_methods;
guint32 attrs;
guint32 table_idx;
} MonoReflectionEventBuilder;
typedef struct {
MonoObject obj;
MonoReflectionMethod *ctor;
MonoArray *data;
} MonoReflectionCustomAttr;
TYPED_HANDLE_DECL (MonoReflectionCustomAttr);
typedef struct {
MonoObject object;
guint32 utype;
gint32 safe_array_subtype;
MonoReflectionType *marshal_safe_array_user_defined_subtype;
gint32 IidParameterIndex;
guint32 array_subtype;
gint16 size_param_index;
gint32 size_const;
MonoString *marshal_type;
MonoReflectionType *marshal_type_ref;
MonoString *marshal_cookie;
} MonoReflectionMarshalAsAttribute;
/* Safely access System.Runtime.InteropServices.MarshalAsAttribute */
TYPED_HANDLE_DECL (MonoReflectionMarshalAsAttribute);
typedef struct {
MonoObject object;
gint32 call_conv;
gint32 charset;
MonoBoolean best_fit_mapping;
MonoBoolean throw_on_unmappable;
MonoBoolean set_last_error;
} MonoReflectionUnmanagedFunctionPointerAttribute;
typedef struct {
MonoObject object;
MonoString *guid;
} MonoReflectionGuidAttribute;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoString *name;
MonoReflectionType *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 call_conv;
MonoReflectionModule *module;
MonoBoolean skip_visibility;
MonoBoolean init_locals;
MonoReflectionILGen *ilgen;
gint32 nrefs;
MonoArray *refs;
GSList *referenced_by;
MonoReflectionType *owner;
} MonoReflectionDynamicMethod;
/* Safely access System.Reflection.Emit.DynamicMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionDynamicMethod);
typedef struct {
MonoObject object;
MonoReflectionModuleBuilder *module;
MonoArray *arguments;
guint32 type;
MonoReflectionType *return_type;
guint32 call_conv;
guint32 unmanaged_call_conv;
MonoArray *modreqs;
MonoArray *modopts;
} MonoReflectionSigHelper;
/* Safely access System.Reflection.Emit.SignatureHelper from native code */
TYPED_HANDLE_DECL (MonoReflectionSigHelper);
typedef struct {
MonoObject object;
MonoBoolean visible;
} MonoReflectionComVisibleAttribute;
typedef struct {
MonoObject object;
MonoReflectionType *type;
} MonoReflectionComDefaultInterfaceAttribute;
enum {
RESOURCE_LOCATION_EMBEDDED = 1,
RESOURCE_LOCATION_ANOTHER_ASSEMBLY = 2,
RESOURCE_LOCATION_IN_MANIFEST = 4
};
typedef struct {
MonoObject object;
MonoReflectionAssembly *assembly;
MonoString *filename;
guint32 location;
} MonoManifestResourceInfo;
/* Safely access System.Reflection.ManifestResourceInfo from native code */
TYPED_HANDLE_DECL (MonoManifestResourceInfo);
/* A boxed IntPtr */
typedef struct {
MonoObject object;
gpointer m_value;
} MonoIntPtr;
/* Keep in sync with System.GenericParameterAttributes */
typedef enum {
GENERIC_PARAMETER_ATTRIBUTE_NON_VARIANT = 0,
GENERIC_PARAMETER_ATTRIBUTE_COVARIANT = 1,
GENERIC_PARAMETER_ATTRIBUTE_CONTRAVARIANT = 2,
GENERIC_PARAMETER_ATTRIBUTE_VARIANCE_MASK = 3,
GENERIC_PARAMETER_ATTRIBUTE_NO_SPECIAL_CONSTRAINT = 0,
GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT = 4,
GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT = 8,
GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT = 16,
GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK = 28
} GenericParameterAttributes;
typedef struct {
MonoType *type;
MonoClassField *field;
MonoProperty *prop;
} CattrNamedArg;
// Keep in sync with System.Runtime.Loader.AssemblyLoadContext.InternalState
typedef enum {
ALIVE = 0,
UNLOADING = 1
} MonoManagedAssemblyLoadContextInternalState;
/* All MonoInternalThread instances should be pinned, so it's safe to use the raw ptr. However
* for uniformity, icall wrapping will make handles anyway. So this is the method for getting the payload.
*/
static inline MonoInternalThread*
mono_internal_thread_handle_ptr (MonoInternalThreadHandle h)
{
/* The SUPPRESS here prevents a Centrinel warning due to merely seeing this
* function definition. Callees will still get a warning unless we
* attach a suppress attribute to the declaration.
*/
return MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (h));
}
guint32 mono_image_insert_string (MonoReflectionModuleBuilderHandle module, MonoStringHandle str, MonoError *error);
guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObjectHandle obj, gboolean create_methodspec, gboolean register_token, MonoError *error);
void mono_dynamic_image_free (MonoDynamicImage *image);
void mono_dynamic_image_free_image (MonoDynamicImage *image);
void mono_dynamic_image_release_gc_roots (MonoDynamicImage *image);
void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb);
void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides, MonoError *error);
void mono_reflection_destroy_dynamic_method (MonoReflectionDynamicMethod *mb);
ICALL_EXPORT
void
ves_icall_SymbolType_create_unmanaged_type (MonoReflectionType *type);
void mono_reflection_register_with_runtime (MonoReflectionType *type);
MonoMethodSignature * mono_reflection_lookup_signature (MonoImage *image, MonoMethod *method, guint32 token, MonoError *error);
MonoArrayHandle mono_param_get_objects_internal (MonoMethod *method, MonoClass *refclass, MonoError *error);
MonoClass*
mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic);
MonoType*
mono_reflection_bind_generic_parameters (MonoReflectionTypeHandle type, int type_argc, MonoType **types, MonoError *error);
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *fields);
ICALL_EXPORT
MonoReflectionEvent *
ves_icall_TypeBuilder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb);
MonoReflectionMarshalAsAttributeHandle
mono_reflection_marshal_as_attribute_from_marshal_spec (MonoClass *klass, MonoMarshalSpec *spec, MonoError *error);
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error);
gboolean
mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass, MonoError *error);
gboolean
mono_get_constant_value_from_blob (MonoTypeEnum type, const char *blob, void *value, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_metadata_read_constant_value (const char *blob, MonoTypeEnum type, void *value, MonoError *error);
char*
mono_string_from_blob (const char *str, MonoError *error);
void
mono_release_type_locks (MonoInternalThread *thread);
/**
* mono_string_handle_length:
* \param s \c MonoString
* \returns the length in characters of the string
*/
#ifdef ENABLE_CHECKED_BUILD_GC
int
mono_string_handle_length (MonoStringHandle s);
#else
#define mono_string_handle_length(s) (MONO_HANDLE_GETVAL ((s), length))
#endif
char *
mono_string_handle_to_utf8 (MonoStringHandle s, MonoError *error);
char *
mono_string_to_utf8_image (MonoImage *image, MonoStringHandle s, MonoError *error);
MonoArrayHandle
mono_array_clone_in_domain (MonoArrayHandle array, MonoError *error);
MonoArray*
mono_array_clone_checked (MonoArray *array, MonoError *error);
void
mono_array_full_copy (MonoArray *src, MonoArray *dest);
void
mono_array_full_copy_unchecked_size (MonoArray *src, MonoArray *dest, MonoClass *klass, uintptr_t size);
gboolean
mono_array_calc_byte_len (MonoClass *klass, uintptr_t len, uintptr_t *res);
MonoArray*
mono_array_new_checked (MonoClass *eclass, uintptr_t n, MonoError *error);
MONO_COMPONENT_API MonoArray*
mono_array_new_full_checked (MonoClass *array_class, uintptr_t *lengths, intptr_t *lower_bounds, MonoError *error);
MonoArray*
mono_array_new_jagged_checked (MonoClass *klass, int n, uintptr_t *lengths, MonoError *error);
ICALL_EXPORT
MonoArray*
ves_icall_array_new_specific (MonoVTable *vtable, uintptr_t n);
gpointer
mono_create_ftnptr (gpointer addr);
gpointer
mono_get_addr_from_ftnptr (gpointer descr);
MONO_COMPONENT_API void
mono_nullable_init (guint8 *buf, MonoObject *value, MonoClass *klass);
void
mono_nullable_init_from_handle (guint8 *buf, MonoObjectHandle value, MonoClass *klass);
void
mono_nullable_init_unboxed (guint8 *buf, gpointer value, MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_value_box_checked (MonoClass *klass, void* val, MonoError *error);
MonoObjectHandle
mono_value_box_handle (MonoClass *klass, gpointer val, MonoError *error);
MONO_COMPONENT_API MonoObject*
mono_nullable_box (gpointer buf, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_nullable_box_handle (gpointer buf, MonoClass *klass, MonoError *error);
// A code size optimization (source and object) equivalent to MONO_HANDLE_NEW (MonoObject, NULL);
MonoObjectHandle
mono_new_null (void);
#ifdef MONO_SMALL_CONFIG
#define MONO_IMT_SIZE 9
#else
#define MONO_IMT_SIZE 19
#endif
typedef union {
int vtable_slot;
gpointer target_code;
} MonoImtItemValue;
typedef struct _MonoImtBuilderEntry {
gpointer key;
struct _MonoImtBuilderEntry *next;
MonoImtItemValue value;
int children;
guint8 has_target_code : 1;
} MonoImtBuilderEntry;
typedef struct _MonoIMTCheckItem MonoIMTCheckItem;
struct _MonoIMTCheckItem {
gpointer key;
int check_target_idx;
MonoImtItemValue value;
guint8 *jmp_code;
guint8 *code_target;
guint8 is_equals;
guint8 compare_done;
guint8 chunk_size;
guint8 short_branch;
guint8 has_target_code;
};
typedef gpointer (*MonoImtTrampolineBuilder) (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_trunk);
void
mono_install_imt_trampoline_builder (MonoImtTrampolineBuilder func);
void
mono_set_always_build_imt_trampolines (gboolean value);
void
mono_vtable_build_imt_slot (MonoVTable* vtable, int imt_slot);
guint32
mono_method_get_imt_slot (MonoMethod *method);
void
mono_method_add_generic_virtual_invocation (MonoVTable *vtable,
gpointer *vtable_slot,
MonoMethod *method, gpointer code);
void
mono_unhandled_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_internal (MonoObject *exc_raw);
MonoVTable *
mono_class_try_get_vtable (MonoClass *klass);
gboolean
mono_runtime_run_module_cctor (MonoImage *image, MonoError *error);
MONO_COMPONENT_API gboolean
mono_runtime_class_init_full (MonoVTable *vtable, MonoError *error);
void
mono_method_clear_object (MonoMethod *method);
gsize*
mono_class_compute_bitmap (MonoClass *klass, gsize *bitmap, int size, int offset, int *max_set, gboolean static_fields);
gboolean
mono_class_is_reflection_method_or_constructor (MonoClass *klass);
MonoObjectHandle
mono_get_object_from_blob (MonoType *type, const char *blob, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_class_has_ref_info (MonoClass *klass);
MonoReflectionTypeBuilder*
mono_class_get_ref_info_raw (MonoClass *klass);
void
mono_class_set_ref_info (MonoClass *klass, MonoObjectHandle obj);
void
mono_class_free_ref_info (MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_object_new_pinned (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_pinned_handle (MonoClass *klass, MonoError *error);
MonoObject *
mono_object_new_specific_checked (MonoVTable *vtable, MonoError *error);
ICALL_EXPORT
MonoObject *
ves_icall_object_new (MonoClass *klass);
ICALL_EXPORT
MonoObject *
ves_icall_object_new_specific (MonoVTable *vtable);
MonoObject *
mono_object_new_alloc_specific_checked (MonoVTable *vtable, MonoError *error);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_get_value_checked (MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
MONO_COMPONENT_API void
mono_field_static_get_value_for_thread (MonoInternalThread *thread, MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
guint8*
mono_static_field_get_addr (MonoVTable *vt, MonoClassField *field);
MonoMethod*
mono_object_handle_get_virtual_method (MonoObjectHandle obj, MonoMethod *method, MonoError *error);
/* exported, used by the debugger */
MONO_API void *
mono_vtable_get_static_field_data (MonoVTable *vt);
MonoObject *
mono_field_get_value_object_checked (MonoClassField *field, MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_static_field_get_value_handle (MonoClassField *field, MonoError *error);
MONO_COMPONENT_API gpointer
mono_special_static_field_get_offset (MonoClassField *field, MonoError *error);
gboolean
mono_property_set_value_handle (MonoProperty *prop, MonoObjectHandle obj, void **params, MonoError *error);
MonoObject*
mono_property_get_value_checked (MonoProperty *prop, void *obj, void **params, MonoError *error);
MonoString*
mono_object_try_to_string (MonoObject *obj, MonoObject **exc, MonoError *error);
char *
mono_string_to_utf8_ignore (MonoString *s);
gboolean
mono_monitor_is_il_fastpath_wrapper (MonoMethod *method);
MonoStringHandle
mono_string_is_interned_lookup (MonoStringHandle str, gboolean insert, MonoError *error);
/**
* mono_string_intern_checked:
* \param str String to intern
* \param error set on error.
* Interns the string passed.
* \returns The interned string. On failure returns NULL and sets \p error
*/
#define mono_string_intern_checked(str, error) (mono_string_is_interned_lookup ((str), TRUE, (error)))
/**
* mono_string_is_interned_internal:
* \param o String to probe
* \returns Whether the string has been interned.
*/
#define mono_string_is_interned_internal(str, error) (mono_string_is_interned_lookup ((str), FALSE, (error)))
char *
mono_exception_handle_get_native_backtrace (MonoExceptionHandle exc);
char *
mono_exception_get_managed_backtrace (MonoException *exc);
gboolean
mono_exception_try_get_managed_backtrace (MonoException *exc, const char *prefix, char **result);
void
mono_copy_value (MonoType *type, void *dest, void *value, int deref_pointer);
void
mono_error_raise_exception_deprecated (MonoError *target_error);
gboolean
mono_error_set_pending_exception_slow (MonoError *error);
static inline gboolean
mono_error_set_pending_exception (MonoError *error)
{
return is_ok (error) ? FALSE : mono_error_set_pending_exception_slow (error);
}
MonoArray *
mono_glist_to_array (GList *list, MonoClass *eclass, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_new_checked (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_handle (MonoClass *klass, MonoError *error);
// This function skips handling of remoting and COM.
// "alloc" means "less".
MonoObjectHandle
mono_object_new_alloc_by_vtable (MonoVTable *vtable, MonoError *error);
MonoObject*
mono_object_new_mature (MonoVTable *vtable, MonoError *error);
MonoObjectHandle
mono_object_new_handle_mature (MonoVTable *vtable, MonoError *error);
MonoObject *
mono_object_clone_checked (MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_object_clone_handle (MonoObjectHandle obj, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_isinst_checked (MonoObject *obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst_mbyref (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
gboolean
mono_object_handle_isinst_mbyref_raw (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoStringHandle
mono_string_new_size_handle (gint32 len, MonoError *error);
MonoString*
mono_string_new_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_size_checked (gint32 len, MonoError *error);
MONO_COMPONENT_API MonoString*
mono_ldstr_checked (MonoImage *image, uint32_t str_index, MonoError *error);
MonoStringHandle
mono_ldstr_handle (MonoImage *image, uint32_t str_index, MonoError *error);
MONO_PROFILER_API MonoString*
mono_string_new_checked (const char *text, MonoError *merror);
MonoString*
mono_string_new_wtf8_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_utf16_checked (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf16_handle (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf8_len (const char *text, guint length, MonoError *error);
MonoString *
mono_string_from_utf16_checked (const mono_unichar2 *data, MonoError *error);
MonoString *
mono_string_from_utf32_checked (const mono_unichar4 *data, MonoError *error);
char*
mono_ldstr_utf8 (MonoImage *image, guint32 idx, MonoError *error);
MONO_COMPONENT_API
char*
mono_utf16_to_utf8 (const mono_unichar2 *s, gsize slength, MonoError *error);
char*
mono_utf16_to_utf8len (const mono_unichar2 *s, gsize slength, gsize *utf8_length, MonoError *error);
gboolean
mono_runtime_object_init_checked (MonoObject *this_obj, MonoError *error);
MONO_PROFILER_API MonoObject*
mono_runtime_try_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
// The exc parameter is deliberately missing and so far this has proven to reduce code duplication.
// In particular, if an exception is returned from underlying otherwise succeeded call,
// is set into the MonoError with mono_error_set_exception_instance.
// The result is that caller need only check MonoError.
MONO_COMPONENT_API MonoObjectHandle
mono_runtime_try_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MONO_COMPONENT_API MonoObject*
mono_runtime_invoke_checked (MonoMethod *method, void *obj, void **params, MonoError *error);
MonoObjectHandle
mono_runtime_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
void
mono_runtime_invoke_handle_void (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MonoObject*
mono_runtime_try_invoke_array (MonoMethod *method, void *obj, MonoArray *params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_invoke_span_checked (MonoMethod *method, void *obj, MonoSpanOfObjects *params,
MonoError *error);
void*
mono_compile_method_checked (MonoMethod *method, MonoError *error);
MonoObject*
mono_runtime_delegate_try_invoke (MonoObject *delegate, void **params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_delegate_invoke_checked (MonoObject *delegate, void **params,
MonoError *error);
MonoArrayHandle
mono_runtime_get_main_args_handle (MonoError *error);
int
mono_runtime_run_main_checked (MonoMethod *method, int argc, char* argv[],
MonoError *error);
int
mono_runtime_try_run_main (MonoMethod *method, int argc, char* argv[],
MonoObject **exc);
int
mono_runtime_exec_main_checked (MonoMethod *method, MonoArray *args, MonoError *error);
int
mono_runtime_try_exec_main (MonoMethod *method, MonoArray *args, MonoObject **exc);
MonoAssembly*
mono_try_assembly_resolve_handle (MonoAssemblyLoadContext *alc, MonoStringHandle fname, MonoAssembly *requesting, MonoError *error);
gboolean
mono_runtime_object_init_handle (MonoObjectHandle this_obj, MonoError *error);
/* GC write barriers support */
void
mono_gc_wbarrier_object_copy_handle (MonoObjectHandle obj, MonoObjectHandle src);
MonoMethod*
mono_class_get_virtual_method (MonoClass *klass, MonoMethod *method, MonoError *error);
MonoStringHandle
mono_string_empty_handle (void);
/*
* mono_object_get_data:
*
* Return a pointer to the beginning of data inside a MonoObject.
*/
static inline gpointer
mono_object_get_data (MonoObject *o)
{
return (guint8*)o + MONO_ABI_SIZEOF (MonoObject);
}
#define mono_handle_get_data_unsafe(handle) ((gpointer)((guint8*)MONO_HANDLE_RAW (handle) + MONO_ABI_SIZEOF (MonoObject)))
MONO_COMPONENT_API gpointer
mono_vtype_get_field_addr (gpointer vtype, MonoClassField *field);
#define MONO_OBJECT_SETREF_INTERNAL(obj,fieldname,value) do { \
mono_gc_wbarrier_set_field_internal ((MonoObject*)(obj), &((obj)->fieldname), (MonoObject*)value); \
/*(obj)->fieldname = (value);*/ \
} while (0)
/* This should be used if 's' can reside on the heap */
#define MONO_STRUCT_SETREF_INTERNAL(s,field,value) do { \
mono_gc_wbarrier_generic_store_internal (&((s)->field), (MonoObject*)(value)); \
} while (0)
static inline gunichar2*
mono_string_chars_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->chars;
}
static inline int
mono_string_length_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->length;
}
MonoString*
mono_string_empty_internal (MonoDomain *domain);
char *
mono_string_to_utf8len (MonoStringHandle s, gsize *utf8len, MonoError *error);
MONO_COMPONENT_API char*
mono_string_to_utf8_checked_internal (MonoString *string_obj, MonoError *error);
mono_bool
mono_string_equal_internal (MonoString *s1, MonoString *s2);
unsigned
mono_string_hash_internal (MonoString *s);
MONO_COMPONENT_API int
mono_object_hash_internal (MonoObject* obj);
ICALL_EXPORT
void
mono_value_copy_internal (void* dest, const void* src, MonoClass *klass);
void
mono_value_copy_array_internal (MonoArray *dest, int dest_idx, const void* src, int count);
MONO_PROFILER_API MonoVTable* mono_object_get_vtable_internal (MonoObject *obj);
MonoDomain*
mono_object_get_domain_internal (MonoObject *obj);
static inline gpointer
mono_object_unbox_internal (MonoObject *obj)
{
/* add assert for valuetypes? */
g_assert (m_class_is_valuetype (mono_object_class (obj)));
return mono_object_get_data (obj);
}
ICALL_EXPORT
void
mono_monitor_exit_internal (MonoObject *obj);
MONO_PROFILER_API unsigned mono_object_get_size_internal (MonoObject *o);
MONO_PROFILER_API MonoDomain* mono_vtable_domain_internal (MonoVTable *vtable);
MONO_PROFILER_API MonoClass* mono_vtable_class_internal (MonoVTable *vtable);
MONO_COMPONENT_API MonoMethod*
mono_object_get_virtual_method_internal (MonoObject *obj, MonoMethod *method);
MonoMethod*
mono_get_delegate_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_begin_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_end_invoke_internal (MonoClass *klass);
void
mono_unhandled_exception_internal (MonoObject *exc);
void
mono_print_unhandled_exception_internal (MonoObject *exc);
void
mono_raise_exception_internal (MonoException *ex);
void
mono_field_set_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_set_value_internal (MonoVTable *vt, MonoClassField *field, void *value);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MonoMethod* mono_get_context_capture_method (void);
guint8*
mono_runtime_get_aotid_arr (void);
/* GC handles support
*
* A handle can be created to refer to a managed object and either prevent it
* from being garbage collected or moved or to be able to know if it has been
* collected or not (weak references).
* mono_gchandle_new () is used to prevent an object from being garbage collected
* until mono_gchandle_free() is called. Use a TRUE value for the pinned argument to
* prevent the object from being moved (this should be avoided as much as possible
* and this should be used only for shorts periods of time or performance will suffer).
* To create a weakref use mono_gchandle_new_weakref (): track_resurrection should
* usually be false (see the GC docs for more details).
* mono_gchandle_get_target () can be used to get the object referenced by both kinds
* of handle: for a weakref handle, if an object has been collected, it will return NULL.
*/
MonoGCHandle
mono_gchandle_new_internal (MonoObject *obj, mono_bool pinned);
MONO_COMPONENT_API MonoGCHandle
mono_gchandle_new_weakref_internal (MonoObject *obj, mono_bool track_resurrection);
MONO_COMPONENT_API
MonoObject*
mono_gchandle_get_target_internal (MonoGCHandle gchandle);
MONO_COMPONENT_API void mono_gchandle_free_internal (MonoGCHandle gchandle);
/* Reference queue support
*
* A reference queue is used to get notifications of when objects are collected.
* Call mono_gc_reference_queue_new to create a new queue and pass the callback that
* will be invoked when registered objects are collected.
* Call mono_gc_reference_queue_add to register a pair of objects and data within a queue.
* The callback will be triggered once an object is both unreachable and finalized.
*/
MonoReferenceQueue*
mono_gc_reference_queue_new_internal (mono_reference_queue_callback callback);
void
mono_gc_reference_queue_free_internal (MonoReferenceQueue *queue);
mono_bool
mono_gc_reference_queue_add_internal (MonoReferenceQueue *queue, MonoObject *obj, void *user_data);
#define mono_gc_reference_queue_add_handle(queue, obj, user_data) \
(mono_gc_reference_queue_add_internal ((queue), MONO_HANDLE_RAW (MONO_HANDLE_CAST (MonoObject, obj)), (user_data)))
/* GC write barriers support */
void
mono_gc_wbarrier_set_field_internal (MonoObject *obj, void* field_ptr, MonoObject* value);
void
mono_gc_wbarrier_set_arrayref_internal (MonoArray *arr, void* slot_ptr, MonoObject* value);
void
mono_gc_wbarrier_arrayref_copy_internal (void* dest_ptr, const void* src_ptr, int count);
MONO_COMPONENT_API void
mono_gc_wbarrier_generic_store_internal (void volatile* ptr, MonoObject* value);
void
mono_gc_wbarrier_generic_store_atomic_internal (void *ptr, MonoObject *value);
ICALL_EXPORT
void
mono_gc_wbarrier_generic_nostore_internal (void* ptr);
void
mono_gc_wbarrier_value_copy_internal (void* dest, const void* src, int count, MonoClass *klass);
void
mono_gc_wbarrier_object_copy_internal (MonoObject* obj, MonoObject *src);
MONO_COMPONENT_API char *
mono_runtime_get_managed_cmd_line (void);
char *
mono_runtime_get_cmd_line (int argc, char **argv);
#ifdef HOST_WASM
int
mono_string_instance_is_interned (MonoString *str);
#endif
#endif /* __MONO_OBJECT_INTERNALS_H__ */
| /**
* \file
*/
#ifndef __MONO_OBJECT_INTERNALS_H__
#define __MONO_OBJECT_INTERNALS_H__
#include <mono/utils/mono-forward-internal.h>
#include <mono/metadata/object-forward.h>
#include <mono/metadata/handle-decl.h>
#include <mono/metadata/object.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/mempool.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/handle.h>
#include <mono/metadata/abi-details.h>
#include "mono/utils/mono-compiler.h"
#include "mono/utils/mono-error.h"
#include "mono/utils/mono-error-internals.h"
#include "mono/utils/mono-machine.h"
#include "mono/utils/mono-stack-unwinding.h"
#include "mono/utils/mono-tls.h"
#include "mono/utils/mono-coop-mutex.h"
#include <mono/metadata/icalls.h>
/* Use this as MONO_CHECK_ARG (arg,expr,) in functions returning void */
#define MONO_CHECK_ARG(arg, expr, retval) do { \
if (G_UNLIKELY (!(expr))) \
{ \
if (0) { (void)(arg); } /* check if the name exists */ \
ERROR_DECL (error); \
mono_error_set_argument_format (error, #arg, "assertion `%s' failed", #expr); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_argument_null (error, (argname), ""); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL_HANDLE (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL_HANDLE(arg, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_HANDLE_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, (argname), ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_NULL (arg,) in functions returning void */
#define MONO_CHECK_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_null_reference (error); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
MONO_COMPONENT_API MonoClass *
mono_class_create_array (MonoClass *element_class, uint32_t rank);
MonoArrayHandle
mono_array_new_specific_handle (MonoVTable *vtable, uintptr_t n, MonoError *error);
MonoArray*
mono_array_new_specific_checked (MonoVTable *vtable, uintptr_t n, MonoError *error);
/*
* Macros which cache.
* These should be used instead of the original versions.
*/
static inline MonoClass*
mono_array_class_get_cached_function (MonoClass *eclass, MonoClass **aclass)
{
MonoClass *a = *aclass;
if (a)
return a;
a = mono_class_create_array (eclass, 1);
g_assert (a);
if (a)
*aclass = a;
return *aclass;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_class_get_cached(eclass) (mono_array_class_get_cached_function ((eclass), &(eclass ## _array)))
static inline MonoArray*
mono_array_new_cached_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArray *arr = NULL;
if (is_ok (error))
arr = mono_array_new_specific_checked (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached(eclass, size, error) \
mono_array_new_cached_function (mono_array_class_get_cached (eclass), (size), (error))
static inline MonoArrayHandle
mono_array_new_cached_handle_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArrayHandle arr = NULL_HANDLE_ARRAY;
if (is_ok (error))
arr = mono_array_new_specific_handle (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached_handle(eclass, size, error) \
mono_array_new_cached_handle_function (mono_array_class_get_cached (eclass), (size), (error))
#ifdef MONO_BIG_ARRAYS
typedef uint64_t mono_array_size_t;
typedef int64_t mono_array_lower_bound_t;
#define MONO_ARRAY_MAX_INDEX G_MAXINT64
#define MONO_ARRAY_MAX_SIZE G_MAXUINT64
#else
typedef uint32_t mono_array_size_t;
typedef int32_t mono_array_lower_bound_t;
#define MONO_ARRAY_MAX_INDEX ((int32_t) 0x7fffffff)
#define MONO_ARRAY_MAX_SIZE ((uint32_t) 0xffffffff)
#endif
typedef struct {
mono_array_size_t length;
mono_array_lower_bound_t lower_bound;
} MonoArrayBounds;
struct _MonoArray {
MonoObject obj;
/* bounds is NULL for szarrays */
MonoArrayBounds *bounds;
/* total number of elements of the array */
mono_array_size_t max_length;
/* we use mono_64bitaligned_t to ensure proper alignment on platforms that need it */
mono_64bitaligned_t vector [MONO_ZERO_LEN_ARRAY];
};
/* match the layout of the managed definition of Span<T> */
#define MONO_DEFINE_SPAN_OF_T(name, type) \
typedef struct { \
type* _pointer; \
uint32_t _length; \
} name;
MONO_DEFINE_SPAN_OF_T (MonoSpanOfObjects, MonoObject*)
#define MONO_SIZEOF_MONO_ARRAY (MONO_STRUCT_OFFSET_CONSTANT (MonoArray, vector))
struct _MonoString {
MonoObject object;
int32_t length;
mono_unichar2 chars [MONO_ZERO_LEN_ARRAY];
};
#define MONO_SIZEOF_MONO_STRING (MONO_STRUCT_OFFSET (MonoString, chars))
#define mono_object_class(obj) (((MonoObject*)(obj))->vtable->klass)
#define mono_object_domain(obj) (((MonoObject*)(obj))->vtable->domain)
#define mono_string_chars_fast(s) ((mono_unichar2*)(s)->chars)
#define mono_string_length_fast(s) ((s)->length)
/**
* mono_array_length_internal:
* \param array a \c MonoArray*
* \returns the total number of elements in the array. This works for
* both vectors and multidimensional arrays.
*/
#define mono_array_length_internal(array) ((array)->max_length)
static inline
uintptr_t
mono_array_handle_length (MonoArrayHandle arr)
{
MONO_REQ_GC_UNSAFE_MODE;
return mono_array_length_internal (MONO_HANDLE_RAW (arr));
}
// Equivalent to mono_array_addr_with_size, except:
// 1. A macro instead of a function -- the types of size and index are open.
// 2. mono_array_addr_with_size could, but does not, do GC mode transitions.
#define mono_array_addr_with_size_fast(array,size,index) ( ((char*)(array)->vector) + (size) * (index) )
#define mono_array_addr_fast(array,type,index) ((type*)(void*) mono_array_addr_with_size_fast (array, sizeof (type), index))
#define mono_array_get_fast(array,type,index) ( *(type*)mono_array_addr_fast ((array), type, (index)) )
#define mono_array_set_fast(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_fast ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_fast(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_fast ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_fast(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_fast ((dest), void*, (destidx)); \
void **__s = mono_array_addr_fast ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
// _internal is like _fast, but preserves the preexisting subtlety of the closed types of things:
// int size
// uintptr_t idx
// in order to mimic non-_internal but without the GC mode transitions, or at least,
// to avoid the runtime using the embedding API, whether or not it has GC mode transitions.
static inline char*
mono_array_addr_with_size_internal (MonoArray *array, int size, uintptr_t idx)
{
return mono_array_addr_with_size_fast (array, size, idx);
}
#define mono_array_addr_internal(array,type,index) ((type*)(void*) mono_array_addr_with_size_internal (array, sizeof (type), index))
#define mono_array_get_internal(array,type,index) ( *(type*)mono_array_addr_internal ((array), type, (index)) )
#define mono_array_set_internal(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_internal ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_internal(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_internal ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_internal(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_internal ((dest), void*, (destidx)); \
void **__s = mono_array_addr_internal ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
static inline gboolean
mono_handle_array_has_bounds (MonoArrayHandle arr)
{
return MONO_HANDLE_GETVAL (arr, bounds) != NULL;
}
static inline void
mono_handle_array_get_bounds_dim (MonoArrayHandle arr, gint32 dim, MonoArrayBounds *bounds)
{
*bounds = MONO_HANDLE_GETVAL (arr, bounds [dim]);
}
#define mono_span_length(span) (span->_length)
#define mono_span_get(span,type,idx) (type)(!span->_pointer ? (type)0 : span->_pointer[idx])
#define mono_span_addr(span,type,idx) (type*)(span->_pointer + idx)
#define mono_span_setref(span,index,value) \
do { \
void **__p = (void **) mono_span_addr ((span), void*, (index)); \
mono_gc_wbarrier_generic_store_internal (__p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
static inline MonoSpanOfObjects
mono_span_create_from_object_array (MonoArray *arr) {
MonoSpanOfObjects span;
if (arr != NULL) {
span._length = (int32_t)mono_array_length_internal (arr);
span._pointer = mono_array_addr_fast (arr, MonoObject*, 0);
} else {
span._length = 0;
span._pointer = NULL;
}
return span;
}
typedef struct {
MonoObject obj;
} MonoMarshalByRefObject;
TYPED_HANDLE_DECL (MonoMarshalByRefObject);
/* This is a copy of System.AppDomain */
struct _MonoAppDomain {
MonoMarshalByRefObject mbr;
};
/* Safely access System.AppDomain from native code */
TYPED_HANDLE_DECL (MonoAppDomain);
typedef struct _MonoStringBuilder MonoStringBuilder;
TYPED_HANDLE_DECL (MonoStringBuilder);
struct _MonoStringBuilder {
MonoObject object;
MonoArray *chunkChars;
MonoStringBuilder* chunkPrevious; // Link to the block logically before this block
int chunkLength; // The index in ChunkChars that represent the end of the block
int chunkOffset; // The logial offset (sum of all characters in previous blocks)
int maxCapacity;
};
static inline int
mono_string_builder_capacity (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkChars->max_length;
}
static inline int
mono_string_builder_string_length (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkLength;
}
typedef struct {
MonoType *type;
gpointer value;
MonoClass *klass;
} MonoTypedRef;
typedef struct {
gpointer args;
} MonoArgumentHandle;
typedef struct {
MonoMethodSignature *sig;
gpointer args;
gint32 next_arg;
gint32 num_args;
} MonoArgIterator;
struct _MonoException {
MonoObject object;
MonoString *class_name;
MonoString *message;
MonoObject *_data;
MonoObject *inner_ex;
MonoString *help_link;
/* Stores the IPs and the generic sharing infos
(vtable/MRGCTX) of the frames. */
MonoArray *trace_ips;
MonoString *stack_trace;
MonoString *remote_stack_trace;
gint32 remote_stack_index;
/* Dynamic methods referenced by the stack trace */
MonoArray *dynamic_methods;
gint32 hresult;
MonoString *source;
MonoObject *serialization_manager;
MonoObject *captured_traces;
MonoArray *native_trace_ips;
gint32 caught_in_unmanaged;
};
typedef struct {
MonoException base;
} MonoSystemException;
TYPED_HANDLE_DECL (MonoSystemException);
typedef struct {
MonoObject object;
MonoObject *async_state;
MonoObject *handle;
MonoObject *async_delegate;
gpointer *data;
MonoObject *object_data;
MonoBoolean sync_completed;
MonoBoolean completed;
MonoBoolean endinvoke_called;
MonoObject *async_callback;
MonoObject *execution_context;
MonoObject *original_context;
gint64 add_time;
} MonoAsyncResult;
TYPED_HANDLE_DECL (MonoAsyncResult);
typedef struct {
MonoMarshalByRefObject object;
gpointer handle;
} MonoWaitHandle;
TYPED_HANDLE_DECL (MonoWaitHandle);
/* System.Threading.StackCrawlMark */
/*
* This type is used to identify the method where execution has entered
* the BCL during stack walks. The outermost public method should
* define it like this:
* StackCrawlMark stackMark = StackCrawlMark.LookForMyCaller;
* and pass the stackMark as a byref argument down the call chain
* until it reaches an icall.
*/
typedef enum {
STACK_CRAWL_ME = 0,
STACK_CRAWL_CALLER = 1,
STACK_CRAWL_CALLERS_CALLER = 2,
STACK_CRAWL_THREAD = 3
} MonoStackCrawlMark;
/* MonoSafeHandle is in class-internals.h. */
/* Safely access System.Net.Sockets.SafeSocketHandle from native code */
TYPED_HANDLE_DECL (MonoSafeHandle);
/* This corresponds to System.Type */
struct _MonoReflectionType {
MonoObject object;
MonoType *type;
};
/* Safely access System.Type from native code */
TYPED_HANDLE_DECL (MonoReflectionType);
/* This corresponds to System.Runtime.CompilerServices.QCallTypeHandle */
struct _MonoQCallTypeHandle {
gpointer _ptr;
MonoType *type;
};
typedef struct _MonoQCallTypeHandle MonoQCallTypeHandle;
/* This corresponds to System.Runtime.CompilerServices.QCallAssembly */
struct _MonoQCallAssemblyHandle {
gpointer _ptr;
MonoAssembly *assembly;
};
typedef struct _MonoQCallAssemblyHandle MonoQCallAssemblyHandle;
typedef struct {
MonoObject object;
MonoReflectionType *class_to_proxy;
MonoObject *context;
MonoObject *unwrapped_server;
gint32 target_domain_id;
MonoString *target_uri;
MonoObject *object_identity;
MonoObject *obj_TP;
MonoObject *stub_data;
} MonoRealProxy;
/* Safely access System.Runtime.Remoting.Proxies.RealProxy from native code */
TYPED_HANDLE_DECL (MonoRealProxy);
typedef struct _MonoIUnknown MonoIUnknown;
typedef struct _MonoIUnknownVTable MonoIUnknownVTable;
/* STDCALL on windows, CDECL everywhere else to work with XPCOM and MainWin COM */
#ifdef HOST_WIN32
#define STDCALL __stdcall
#else
#define STDCALL
#endif
struct _MonoIUnknownVTable
{
int (STDCALL *QueryInterface)(MonoIUnknown *pUnk, gconstpointer riid, gpointer* ppv);
int (STDCALL *AddRef)(MonoIUnknown *pUnk);
int (STDCALL *Release)(MonoIUnknown *pUnk);
};
struct _MonoIUnknown
{
const MonoIUnknownVTable *vtable;
};
typedef struct {
MonoMarshalByRefObject object;
MonoIUnknown *iunknown;
GHashTable* itf_hash;
MonoObject *synchronization_context;
} MonoComObject;
TYPED_HANDLE_DECL (MonoComObject);
typedef struct {
MonoRealProxy real_proxy;
MonoComObject *com_object;
gint32 ref_count;
} MonoComInteropProxy;
TYPED_HANDLE_DECL (MonoComInteropProxy);
typedef struct {
MonoObject object;
MonoRealProxy *rp;
MonoRemoteClass *remote_class;
MonoBoolean custom_type_info;
} MonoTransparentProxy;
/* Safely access System.Runtime.Remoting.Proxies.TransparentProxy from native code */
TYPED_HANDLE_DECL (MonoTransparentProxy);
typedef struct {
MonoObject obj;
MonoReflectionMethod *method;
MonoArray *args;
MonoArray *names;
MonoArray *arg_types;
MonoObject *ctx;
MonoObject *rval;
MonoObject *exc;
MonoAsyncResult *async_result;
guint32 call_type;
} MonoMethodMessage;
TYPED_HANDLE_DECL (MonoMethodMessage);
/* Keep in sync with the System.MonoAsyncCall */
typedef struct {
MonoObject object;
MonoMethodMessage *msg;
MonoMethod *cb_method;
MonoDelegate *cb_target;
MonoObject *state;
MonoObject *res;
MonoArray *out_args;
} MonoAsyncCall;
TYPED_HANDLE_DECL (MonoAsyncCall);
typedef struct {
MonoObject obj;
MonoArray *frames;
MonoArray *captured_traces;
MonoBoolean debug_info;
} MonoStackTrace;
TYPED_HANDLE_DECL (MonoStackTrace);
typedef struct {
MonoObject obj;
gint32 il_offset;
gint32 native_offset;
gint64 method_address;
gint32 method_index;
MonoReflectionMethod *method;
MonoString *filename;
gint32 line;
gint32 column;
MonoString *internal_method_name;
} MonoStackFrame;
TYPED_HANDLE_DECL (MonoStackFrame);
typedef enum {
MONO_THREAD_FLAG_DONT_MANAGE = 1, // Don't wait for or abort this thread
MONO_THREAD_FLAG_NAME_SET = 2, // Thread name set from managed code
MONO_THREAD_FLAG_CLEANUP_FROM_NATIVE = 4, // Thread initialized in native so clean up in native
} MonoThreadFlags;
struct _MonoThreadInfo;
typedef struct MonoThreadName {
char* volatile chars; // null check outside of lock
gint32 free; // bool
gint32 length;
} MonoThreadName;
void
mono_gstring_append_thread_name (GString*, MonoInternalThread*);
struct _MonoInternalThread {
MonoObject obj;
volatile int lock_thread_id; /* to be used as the pre-shifted thread id in thin locks */
MonoThreadHandle *handle;
gpointer native_handle;
MonoThreadName name;
guint32 state; /* must be accessed while longlived->synch_cs is locked */
MonoException *abort_exc;
MonoGCHandle abort_state_handle;
guint64 tid; /* This is accessed as a gsize in the code (so it can hold a 64bit pointer on systems that need it), but needs to reserve 64 bits of space on all machines as it corresponds to a field in managed code */
gsize debugger_thread; // FIXME switch to bool as soon as CI testing with corlib version bump works
gpointer *static_data;
struct _MonoThreadInfo *thread_info;
/* This is modified using atomic ops, so keep it a gint32 */
gint32 __interruption_requested;
/* data that must live as long as this managed object is not finalized
* or as long as the underlying thread is attached, whichever is
* longer */
MonoLongLivedThreadData *longlived;
MonoBoolean threadpool_thread;
guint8 apartment_state;
gint32 managed_id;
guint32 small_id;
MonoThreadManageCallback manage_callback;
gsize flags;
gpointer thread_pinning_ref;
gint32 priority;
GPtrArray *owned_mutexes;
MonoOSEvent *suspended;
gint32 self_suspended; // TRUE | FALSE
gsize thread_state;
/* Points to self, set when starting up/attaching */
struct _MonoInternalThread *internal_thread;
MonoException *pending_exception;
/* This is used only to check that we are in sync between the representation
* of MonoInternalThread in native and InternalThread in managed
*
* DO NOT RENAME! DO NOT ADD FIELDS AFTER! */
gpointer last;
};
typedef struct {
guint32 state;
MonoObject *additional;
} MonoStreamingContext;
typedef struct {
MonoObject object;
guint32 intType;
} MonoInterfaceTypeAttribute;
typedef struct {
MonoObject object;
guint32 intType;
} MonoClassInterfaceAttribute;
/* Safely access System.Delegate from native code */
TYPED_HANDLE_DECL (MonoDelegate);
typedef void (*InterpJitInfoFunc) (MonoJitInfo *ji, gpointer user_data);
/*
* Callbacks supplied by the runtime and called by the modules in metadata/
* This interface is easier to extend than adding a new function type +
* a new 'install' function for every callback.
*/
typedef struct {
gpointer (*create_ftnptr) (gpointer addr);
gpointer (*get_addr_from_ftnptr) (gpointer descr);
char* (*get_runtime_build_info) (void);
const char* (*get_runtime_build_version) (void);
gpointer (*get_vtable_trampoline) (MonoVTable *vtable, int slot_index);
gpointer (*get_imt_trampoline) (MonoVTable *vtable, int imt_slot_index);
gboolean (*imt_entry_inited) (MonoVTable *vtable, int imt_slot_index);
void (*set_cast_details) (MonoClass *from, MonoClass *to);
void (*debug_log) (int level, MonoString *category, MonoString *message);
gboolean (*debug_log_is_enabled) (void);
void (*init_delegate) (MonoDelegateHandle delegate, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoObject* (*runtime_invoke) (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
void* (*compile_method) (MonoMethod *method, MonoError *error);
gpointer (*create_jit_trampoline) (MonoMethod *method, MonoError *error);
/* used to free a dynamic method */
void (*free_method) (MonoMethod *method);
gpointer (*create_delegate_trampoline) (MonoClass *klass);
GHashTable *(*get_weak_field_indexes) (MonoImage *image);
gboolean (*is_interpreter_enabled) (void);
void (*init_mem_manager)(MonoMemoryManager*);
void (*free_mem_manager)(MonoMemoryManager*);
void (*metadata_update_published) (MonoAssemblyLoadContext *alc, uint32_t generation);
void (*get_jit_stats)(gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time);
void (*get_exception_stats)(guint32 *exception_count);
// Same as compile_method, but returns a MonoFtnDesc in llvmonly mode
gpointer (*get_ftnptr)(MonoMethod *method, MonoError *error);
void (*interp_jit_info_foreach)(InterpJitInfoFunc func, gpointer user_data);
gboolean (*interp_sufficient_stack)(gsize size);
} MonoRuntimeCallbacks;
typedef gboolean (*MonoInternalStackWalk) (MonoStackFrameInfo *frame, MonoContext *ctx, gpointer data);
typedef gboolean (*MonoInternalExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data);
typedef struct {
void (*mono_walk_stack_with_ctx) (MonoInternalStackWalk func, MonoContext *ctx, MonoUnwindOptions options, void *user_data);
void (*mono_walk_stack_with_state) (MonoInternalStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions options, void *user_data);
void (*mono_raise_exception) (MonoException *ex);
void (*mono_raise_exception_with_ctx) (MonoException *ex, MonoContext *ctx);
gboolean (*mono_exception_walk_trace) (MonoException *ex, MonoInternalExceptionFrameWalk func, gpointer user_data);
gboolean (*mono_install_handler_block_guard) (MonoThreadUnwindState *unwind_state);
void (*mono_uninstall_current_handler_block_guard) (void);
gboolean (*mono_current_thread_has_handle_block_guard) (void);
gboolean (*mono_above_abort_threshold) (void);
void (*mono_clear_abort_threshold) (void);
void (*mono_reraise_exception) (MonoException *ex);
} MonoRuntimeExceptionHandlingCallbacks;
MONO_COLD void mono_set_pending_exception (MonoException *exc);
void
mono_delegate_ctor (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoMethod *
mono_get_delegate_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_begin_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_end_invoke_checked (MonoClass *klass, MonoError *error);
void
mono_runtime_free_method (MonoMethod *method);
void
mono_install_callbacks (MonoRuntimeCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeCallbacks*
mono_get_runtime_callbacks (void);
void
mono_install_eh_callbacks (MonoRuntimeExceptionHandlingCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeExceptionHandlingCallbacks *
mono_get_eh_callbacks (void);
void
mono_raise_exception_deprecated (MonoException *ex);
void
mono_reraise_exception_deprecated (MonoException *ex);
void
mono_raise_exception_with_context (MonoException *ex, MonoContext *ctx);
void
mono_type_initialization_init (void);
int
mono_thread_kill (MonoInternalThread *thread, int signal);
MonoNativeTlsKey
mono_thread_get_tls_key (void);
gint32
mono_thread_get_tls_offset (void);
MonoNativeTlsKey
mono_domain_get_tls_key (void);
gint32
mono_domain_get_tls_offset (void);
/* Reflection and Reflection.Emit support */
/*
* Handling System.Type objects:
*
* Fields defined as System.Type in managed code should be defined as MonoObject*
* in unmanaged structures, and the monotype_cast () function should be used for
* casting them to MonoReflectionType* to avoid crashes/security issues when
* encountering instances of user defined subclasses of System.Type.
*/
#define IS_MONOTYPE(obj) (!(obj) || (m_class_get_image (mono_object_class ((obj))) == mono_defaults.corlib && ((MonoReflectionType*)(obj))->type != NULL))
#define IS_MONOTYPE_HANDLE(obj) IS_MONOTYPE (MONO_HANDLE_RAW (obj))
/* This should be used for accessing members of Type[] arrays */
#define mono_type_array_get(arr,index) monotype_cast (mono_array_get_internal ((arr), gpointer, (index)))
/*
* Cast an object to MonoReflectionType, making sure it is a System.MonoType or
* a subclass of it.
*/
static inline MonoReflectionType*
monotype_cast (MonoObject *obj)
{
g_assert (IS_MONOTYPE (obj));
return (MonoReflectionType*)obj;
}
/*
* The following structure must match the C# implementation in our corlib.
*/
struct _MonoReflectionMethod {
MonoObject object;
MonoMethod *method;
MonoString *name;
MonoReflectionType *reftype;
};
/* Safely access System.Reflection.MonoMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionMethod);
struct _MonoDelegate {
MonoObject object;
/* The compiled code of the target method */
gpointer method_ptr;
/* The invoke code */
gpointer invoke_impl;
MonoObject *target;
MonoMethod *method;
gpointer delegate_trampoline;
/* Extra argument passed to the target method in llvmonly mode */
gpointer extra_arg;
/*
* If non-NULL, this points to a memory location which stores the address of
* the compiled code of the method, or NULL if it is not yet compiled.
*/
guint8 **method_code;
gpointer interp_method;
/* Interp method that is executed when invoking the delegate */
gpointer interp_invoke_impl;
MonoReflectionMethod *method_info;
MonoReflectionMethod *original_method_info;
MonoObject *data;
MonoBoolean method_is_virtual;
};
typedef struct _MonoMulticastDelegate MonoMulticastDelegate;
struct _MonoMulticastDelegate {
MonoDelegate delegate;
MonoArray *delegates;
};
/* Safely access System.MulticastDelegate from native code */
TYPED_HANDLE_DECL (MonoMulticastDelegate);
struct _MonoReflectionField {
MonoObject object;
MonoClass *klass;
MonoClassField *field;
MonoString *name;
MonoReflectionType *type;
guint32 attrs;
};
/* Safely access System.Reflection.MonoField from native code */
TYPED_HANDLE_DECL (MonoReflectionField);
struct _MonoReflectionProperty {
MonoObject object;
MonoClass *klass;
MonoProperty *property;
};
/* Safely access System.Reflection.MonoProperty from native code */
TYPED_HANDLE_DECL (MonoReflectionProperty);
/*This is System.EventInfo*/
struct _MonoReflectionEvent {
MonoObject object;
};
/* Safely access System.Reflection.EventInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionEvent);
typedef struct {
MonoReflectionEvent object;
MonoClass *klass;
MonoEvent *event;
} MonoReflectionMonoEvent;
/* Safely access Systme.Reflection.MonoEvent from native code */
TYPED_HANDLE_DECL (MonoReflectionMonoEvent);
typedef struct {
MonoObject object;
} MonoReflectionParameter;
/* Safely access System.Reflection.ParameterInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionParameter);
struct _MonoReflectionMethodBody {
MonoObject object;
};
/* Safely access System.Reflection.MethodBody from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBody);
/* System.RuntimeAssembly */
struct _MonoReflectionAssembly {
MonoObject object;
MonoAssembly *assembly;
};
typedef struct {
MonoReflectionType *utype;
MonoArray *values;
MonoArray *names;
} MonoEnumInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *ret;
guint32 attrs;
guint32 implattrs;
guint32 callconv;
} MonoMethodInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *declaring_type;
MonoString *name;
MonoReflectionMethod *get;
MonoReflectionMethod *set;
guint32 attrs;
} MonoPropertyInfo;
typedef struct {
MonoReflectionType *declaring_type;
MonoReflectionType *reflected_type;
MonoString *name;
MonoReflectionMethod *add_method;
MonoReflectionMethod *remove_method;
MonoReflectionMethod *raise_method;
guint32 attrs;
MonoArray *other_methods;
} MonoEventInfo;
typedef struct {
MonoObject *member;
gint32 code_pos;
} MonoReflectionILTokenInfo;
typedef struct {
MonoObject object;
MonoArray *code;
gint32 code_len;
gint32 max_stack;
gint32 cur_stack;
MonoArray *locals;
MonoArray *ex_handlers;
gint32 num_token_fixups;
MonoArray *token_fixups;
} MonoReflectionILGen;
typedef struct {
MonoArray *handlers;
gint32 start;
gint32 len;
gint32 label;
} MonoILExceptionInfo;
typedef struct {
MonoObject *extype;
gint32 type;
gint32 start;
gint32 len;
gint32 filter_offset;
} MonoILExceptionBlock;
typedef struct {
MonoObject object;
MonoObject *catch_type;
gint32 filter_offset;
gint32 flags;
gint32 try_offset;
gint32 try_length;
gint32 handler_offset;
gint32 handler_length;
} MonoReflectionExceptionHandlingClause;
/* Safely access System.Reflection.ExceptionHandlingClause from native code */
TYPED_HANDLE_DECL (MonoReflectionExceptionHandlingClause);
typedef struct {
MonoObject object;
MonoReflectionType *local_type;
MonoBoolean is_pinned;
guint16 local_index;
} MonoReflectionLocalVariableInfo;
/* Safely access System.Reflection.LocalVariableInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionLocalVariableInfo);
typedef struct {
/*
* Must have the same layout as MonoReflectionLocalVariableInfo, since
* LocalBuilder inherits from it under net 2.0.
*/
MonoObject object;
MonoObject *type;
MonoBoolean is_pinned;
guint16 local_index;
MonoString *name;
} MonoReflectionLocalBuilder;
typedef struct {
MonoObject object;
gint32 count;
gint32 type;
gint32 eltype;
MonoString *guid;
MonoString *mcookie;
MonoString *marshaltype;
MonoObject *marshaltyperef;
gint32 param_num;
MonoBoolean has_size;
} MonoReflectionMarshal;
typedef struct {
MonoObject object;
MonoObject* methodb;
MonoString *name;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
guint32 attrs;
int position;
guint32 table_idx;
MonoObject *def_value;
} MonoReflectionParamBuilder;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionILGen *ilgen;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
guint32 table_idx;
guint32 call_conv;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoBoolean init_locals;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionCtorBuilder;
/* Safely access System.Reflection.Emit.ConstructorBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionCtorBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoObject *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
MonoString *name;
guint32 table_idx;
MonoArray *code;
MonoReflectionILGen *ilgen;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoArray *override_methods;
MonoString *dll;
MonoString *dllentry;
guint32 charset;
guint32 extra_flags;
guint32 native_cc;
guint32 call_conv;
MonoBoolean init_locals;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoArray *return_modreq;
MonoArray *return_modopt;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionMethodBuilder;
/* Safely access System.Reflection.Emit.MethodBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionType *parent;
MonoReflectionType *ret;
MonoArray *parameters;
MonoString *name;
guint32 table_idx;
guint32 call_conv;
} MonoReflectionArrayMethod;
/* Safely access System.Reflection.Emit.MonoArrayMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayMethod);
typedef struct {
MonoReflectionAssembly assembly;
MonoDynamicAssembly *dynamic_assembly;
MonoArray *modules;
MonoString *name;
MonoArray *cattrs;
MonoString *version;
MonoString *culture;
MonoArray *public_key_token;
MonoArray *loaded_modules;
guint32 access;
} MonoReflectionAssemblyBuilder;
/* Safely access System.Reflection.Emit.AssemblyBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionAssemblyBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoObject *type;
MonoString *name;
MonoObject *def_value;
gint32 offset;
MonoReflectionType *typeb;
MonoArray *rva_data;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
MonoClassField *handle;
MonoArray *modreq;
MonoArray *modopt;
} MonoReflectionFieldBuilder;
/* Safely access System.Reflection.Emit.FieldBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionFieldBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoString *name;
MonoObject *type;
MonoArray *parameters;
MonoArray *cattrs;
MonoObject *def_value;
MonoReflectionMethodBuilder *set_method;
MonoReflectionMethodBuilder *get_method;
gint32 table_idx;
MonoObject *type_builder;
MonoArray *returnModReq;
MonoArray *returnModOpt;
MonoArray *paramModReq;
MonoArray *paramModOpt;
guint32 call_conv;
} MonoReflectionPropertyBuilder;
/* System.RuntimeModule */
struct _MonoReflectionModule {
MonoObject obj;
MonoImage *image;
MonoReflectionAssembly *assembly;
MonoString *fqname;
MonoString *name;
MonoString *scopename;
MonoBoolean is_resource;
guint32 token;
};
/* Safely access System.Reflection.Module from native code */
TYPED_HANDLE_DECL (MonoReflectionModule);
typedef struct {
MonoReflectionModule module;
MonoDynamicImage *dynamic_image;
gint32 num_types;
MonoArray *types;
MonoArray *cattrs;
guint32 table_idx;
MonoReflectionAssemblyBuilder *assemblyb;
gboolean is_main;
MonoArray *resources;
GHashTable *unparented_classes;
MonoArray *table_indexes;
} MonoReflectionModuleBuilder;
/* Safely acess System.Reflection.Emit.ModuleBuidler from native code */
TYPED_HANDLE_DECL (MonoReflectionModuleBuilder);
typedef enum {
MonoTypeBuilderNew = 0,
MonoTypeBuilderEntered = 1,
MonoTypeBuilderFinished = 2
} MonoTypeBuilderState;
struct _MonoReflectionTypeBuilder {
MonoReflectionType type;
MonoString *name;
MonoString *nspace;
MonoObject *parent;
MonoReflectionType *nesting_type;
MonoArray *interfaces;
gint32 num_methods;
MonoArray *methods;
MonoArray *ctors;
MonoArray *properties;
gint32 num_fields;
MonoArray *fields;
MonoArray *events;
MonoArray *cattrs;
MonoArray *subtypes;
guint32 attrs;
guint32 table_idx;
MonoReflectionModuleBuilder *module;
gint32 class_size;
gint32 packing_size;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoReflectionType *created;
gint32 is_byreflike_set;
gint32 state;
};
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
gint32 rank;
} MonoReflectionArrayType;
/* Safely access System.Reflection.Emit.ArrayType (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayType);
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
} MonoReflectionDerivedType;
/* Safely access System.Reflection.Emit.SymbolType and subclasses (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionDerivedType);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tbuilder;
MonoReflectionMethodBuilder *mbuilder;
MonoString *name;
guint32 index;
MonoReflectionType *base_type;
MonoArray *iface_constraints;
MonoArray *cattrs;
guint32 attrs;
} MonoReflectionGenericParam;
/* Safely access System.Reflection.Emit.GenericTypeParameterBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericParam);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tb;
} MonoReflectionEnumBuilder;
/* Safely access System.Reflection.Emit.EnumBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionEnumBuilder);
typedef struct _MonoReflectionGenericClass MonoReflectionGenericClass;
struct _MonoReflectionGenericClass {
MonoReflectionType type;
MonoReflectionType *generic_type; /*Can be either a MonoType or a TypeBuilder*/
MonoArray *type_arguments;
};
/* Safely access System.Reflection.Emit.TypeBuilderInstantiation from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericClass);
typedef struct {
MonoObject obj;
MonoString *name;
MonoReflectionType *type;
MonoReflectionTypeBuilder *typeb;
MonoArray *cattrs;
MonoReflectionMethodBuilder *add_method;
MonoReflectionMethodBuilder *remove_method;
MonoReflectionMethodBuilder *raise_method;
MonoArray *other_methods;
guint32 attrs;
guint32 table_idx;
} MonoReflectionEventBuilder;
typedef struct {
MonoObject obj;
MonoReflectionMethod *ctor;
MonoArray *data;
} MonoReflectionCustomAttr;
TYPED_HANDLE_DECL (MonoReflectionCustomAttr);
typedef struct {
MonoObject object;
guint32 utype;
gint32 safe_array_subtype;
MonoReflectionType *marshal_safe_array_user_defined_subtype;
gint32 IidParameterIndex;
guint32 array_subtype;
gint16 size_param_index;
gint32 size_const;
MonoString *marshal_type;
MonoReflectionType *marshal_type_ref;
MonoString *marshal_cookie;
} MonoReflectionMarshalAsAttribute;
/* Safely access System.Runtime.InteropServices.MarshalAsAttribute */
TYPED_HANDLE_DECL (MonoReflectionMarshalAsAttribute);
typedef struct {
MonoObject object;
gint32 call_conv;
gint32 charset;
MonoBoolean best_fit_mapping;
MonoBoolean throw_on_unmappable;
MonoBoolean set_last_error;
} MonoReflectionUnmanagedFunctionPointerAttribute;
typedef struct {
MonoObject object;
MonoString *guid;
} MonoReflectionGuidAttribute;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoString *name;
MonoReflectionType *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 call_conv;
MonoReflectionModule *module;
MonoBoolean skip_visibility;
MonoBoolean init_locals;
MonoReflectionILGen *ilgen;
gint32 nrefs;
MonoArray *refs;
GSList *referenced_by;
MonoReflectionType *owner;
} MonoReflectionDynamicMethod;
/* Safely access System.Reflection.Emit.DynamicMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionDynamicMethod);
typedef struct {
MonoObject object;
MonoReflectionModuleBuilder *module;
MonoArray *arguments;
guint32 type;
MonoReflectionType *return_type;
guint32 call_conv;
guint32 unmanaged_call_conv;
MonoArray *modreqs;
MonoArray *modopts;
} MonoReflectionSigHelper;
/* Safely access System.Reflection.Emit.SignatureHelper from native code */
TYPED_HANDLE_DECL (MonoReflectionSigHelper);
typedef struct {
MonoObject object;
MonoBoolean visible;
} MonoReflectionComVisibleAttribute;
typedef struct {
MonoObject object;
MonoReflectionType *type;
} MonoReflectionComDefaultInterfaceAttribute;
enum {
RESOURCE_LOCATION_EMBEDDED = 1,
RESOURCE_LOCATION_ANOTHER_ASSEMBLY = 2,
RESOURCE_LOCATION_IN_MANIFEST = 4
};
typedef struct {
MonoObject object;
MonoReflectionAssembly *assembly;
MonoString *filename;
guint32 location;
} MonoManifestResourceInfo;
/* Safely access System.Reflection.ManifestResourceInfo from native code */
TYPED_HANDLE_DECL (MonoManifestResourceInfo);
/* A boxed IntPtr */
typedef struct {
MonoObject object;
gpointer m_value;
} MonoIntPtr;
/* Keep in sync with System.GenericParameterAttributes */
typedef enum {
GENERIC_PARAMETER_ATTRIBUTE_NON_VARIANT = 0,
GENERIC_PARAMETER_ATTRIBUTE_COVARIANT = 1,
GENERIC_PARAMETER_ATTRIBUTE_CONTRAVARIANT = 2,
GENERIC_PARAMETER_ATTRIBUTE_VARIANCE_MASK = 3,
GENERIC_PARAMETER_ATTRIBUTE_NO_SPECIAL_CONSTRAINT = 0,
GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT = 4,
GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT = 8,
GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT = 16,
GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK = 28
} GenericParameterAttributes;
typedef struct {
MonoType *type;
MonoClassField *field;
MonoProperty *prop;
} CattrNamedArg;
// Keep in sync with System.Runtime.Loader.AssemblyLoadContext.InternalState
typedef enum {
ALIVE = 0,
UNLOADING = 1
} MonoManagedAssemblyLoadContextInternalState;
/* All MonoInternalThread instances should be pinned, so it's safe to use the raw ptr. However
* for uniformity, icall wrapping will make handles anyway. So this is the method for getting the payload.
*/
static inline MonoInternalThread*
mono_internal_thread_handle_ptr (MonoInternalThreadHandle h)
{
/* The SUPPRESS here prevents a Centrinel warning due to merely seeing this
* function definition. Callees will still get a warning unless we
* attach a suppress attribute to the declaration.
*/
return MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (h));
}
guint32 mono_image_insert_string (MonoReflectionModuleBuilderHandle module, MonoStringHandle str, MonoError *error);
guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObjectHandle obj, gboolean create_methodspec, gboolean register_token, MonoError *error);
void mono_dynamic_image_free (MonoDynamicImage *image);
void mono_dynamic_image_free_image (MonoDynamicImage *image);
void mono_dynamic_image_release_gc_roots (MonoDynamicImage *image);
void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb);
void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides, MonoError *error);
void mono_reflection_destroy_dynamic_method (MonoReflectionDynamicMethod *mb);
ICALL_EXPORT
void
ves_icall_SymbolType_create_unmanaged_type (MonoReflectionType *type);
void mono_reflection_register_with_runtime (MonoReflectionType *type);
MonoMethodSignature * mono_reflection_lookup_signature (MonoImage *image, MonoMethod *method, guint32 token, MonoError *error);
MonoArrayHandle mono_param_get_objects_internal (MonoMethod *method, MonoClass *refclass, MonoError *error);
MonoClass*
mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic);
MonoType*
mono_reflection_bind_generic_parameters (MonoReflectionTypeHandle type, int type_argc, MonoType **types, MonoError *error);
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *fields);
ICALL_EXPORT
MonoReflectionEvent *
ves_icall_TypeBuilder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb);
MonoReflectionMarshalAsAttributeHandle
mono_reflection_marshal_as_attribute_from_marshal_spec (MonoClass *klass, MonoMarshalSpec *spec, MonoError *error);
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error);
gboolean
mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass, MonoError *error);
gboolean
mono_get_constant_value_from_blob (MonoTypeEnum type, const char *blob, void *value, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_metadata_read_constant_value (const char *blob, MonoTypeEnum type, void *value, MonoError *error);
char*
mono_string_from_blob (const char *str, MonoError *error);
void
mono_release_type_locks (MonoInternalThread *thread);
/**
* mono_string_handle_length:
* \param s \c MonoString
* \returns the length in characters of the string
*/
#ifdef ENABLE_CHECKED_BUILD_GC
int
mono_string_handle_length (MonoStringHandle s);
#else
#define mono_string_handle_length(s) (MONO_HANDLE_GETVAL ((s), length))
#endif
char *
mono_string_handle_to_utf8 (MonoStringHandle s, MonoError *error);
char *
mono_string_to_utf8_image (MonoImage *image, MonoStringHandle s, MonoError *error);
MonoArrayHandle
mono_array_clone_in_domain (MonoArrayHandle array, MonoError *error);
MonoArray*
mono_array_clone_checked (MonoArray *array, MonoError *error);
void
mono_array_full_copy (MonoArray *src, MonoArray *dest);
void
mono_array_full_copy_unchecked_size (MonoArray *src, MonoArray *dest, MonoClass *klass, uintptr_t size);
gboolean
mono_array_calc_byte_len (MonoClass *klass, uintptr_t len, uintptr_t *res);
MonoArray*
mono_array_new_checked (MonoClass *eclass, uintptr_t n, MonoError *error);
MONO_COMPONENT_API MonoArray*
mono_array_new_full_checked (MonoClass *array_class, uintptr_t *lengths, intptr_t *lower_bounds, MonoError *error);
MonoArray*
mono_array_new_jagged_checked (MonoClass *klass, int n, uintptr_t *lengths, MonoError *error);
ICALL_EXPORT
MonoArray*
ves_icall_array_new_specific (MonoVTable *vtable, uintptr_t n);
gpointer
mono_create_ftnptr (gpointer addr);
gpointer
mono_get_addr_from_ftnptr (gpointer descr);
MONO_COMPONENT_API void
mono_nullable_init (guint8 *buf, MonoObject *value, MonoClass *klass);
void
mono_nullable_init_from_handle (guint8 *buf, MonoObjectHandle value, MonoClass *klass);
void
mono_nullable_init_unboxed (guint8 *buf, gpointer value, MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_value_box_checked (MonoClass *klass, void* val, MonoError *error);
MonoObjectHandle
mono_value_box_handle (MonoClass *klass, gpointer val, MonoError *error);
MONO_COMPONENT_API MonoObject*
mono_nullable_box (gpointer buf, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_nullable_box_handle (gpointer buf, MonoClass *klass, MonoError *error);
// A code size optimization (source and object) equivalent to MONO_HANDLE_NEW (MonoObject, NULL);
MonoObjectHandle
mono_new_null (void);
#ifdef MONO_SMALL_CONFIG
#define MONO_IMT_SIZE 9
#else
#define MONO_IMT_SIZE 19
#endif
typedef union {
int vtable_slot;
gpointer target_code;
} MonoImtItemValue;
typedef struct _MonoImtBuilderEntry {
gpointer key;
struct _MonoImtBuilderEntry *next;
MonoImtItemValue value;
int children;
guint8 has_target_code : 1;
} MonoImtBuilderEntry;
typedef struct _MonoIMTCheckItem MonoIMTCheckItem;
struct _MonoIMTCheckItem {
gpointer key;
int check_target_idx;
MonoImtItemValue value;
guint8 *jmp_code;
guint8 *code_target;
guint8 is_equals;
guint8 compare_done;
guint8 chunk_size;
guint8 short_branch;
guint8 has_target_code;
};
typedef gpointer (*MonoImtTrampolineBuilder) (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_trunk);
void
mono_install_imt_trampoline_builder (MonoImtTrampolineBuilder func);
void
mono_set_always_build_imt_trampolines (gboolean value);
void
mono_vtable_build_imt_slot (MonoVTable* vtable, int imt_slot);
guint32
mono_method_get_imt_slot (MonoMethod *method);
void
mono_method_add_generic_virtual_invocation (MonoVTable *vtable,
gpointer *vtable_slot,
MonoMethod *method, gpointer code);
void
mono_unhandled_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_internal (MonoObject *exc_raw);
MonoVTable *
mono_class_try_get_vtable (MonoClass *klass);
gboolean
mono_runtime_run_module_cctor (MonoImage *image, MonoError *error);
MONO_COMPONENT_API gboolean
mono_runtime_class_init_full (MonoVTable *vtable, MonoError *error);
void
mono_method_clear_object (MonoMethod *method);
gsize*
mono_class_compute_bitmap (MonoClass *klass, gsize *bitmap, int size, int offset, int *max_set, gboolean static_fields);
gboolean
mono_class_is_reflection_method_or_constructor (MonoClass *klass);
MonoObjectHandle
mono_get_object_from_blob (MonoType *type, const char *blob, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_class_has_ref_info (MonoClass *klass);
MonoReflectionTypeBuilder*
mono_class_get_ref_info_raw (MonoClass *klass);
void
mono_class_set_ref_info (MonoClass *klass, MonoObjectHandle obj);
void
mono_class_free_ref_info (MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_object_new_pinned (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_pinned_handle (MonoClass *klass, MonoError *error);
MonoObject *
mono_object_new_specific_checked (MonoVTable *vtable, MonoError *error);
ICALL_EXPORT
MonoObject *
ves_icall_object_new (MonoClass *klass);
ICALL_EXPORT
MonoObject *
ves_icall_object_new_specific (MonoVTable *vtable);
MonoObject *
mono_object_new_alloc_specific_checked (MonoVTable *vtable, MonoError *error);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_get_value_checked (MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
MONO_COMPONENT_API void
mono_field_static_get_value_for_thread (MonoInternalThread *thread, MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
guint8*
mono_static_field_get_addr (MonoVTable *vt, MonoClassField *field);
MonoMethod*
mono_object_handle_get_virtual_method (MonoObjectHandle obj, MonoMethod *method, MonoError *error);
/* exported, used by the debugger */
MONO_API void *
mono_vtable_get_static_field_data (MonoVTable *vt);
MonoObject *
mono_field_get_value_object_checked (MonoClassField *field, MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_static_field_get_value_handle (MonoClassField *field, MonoError *error);
MONO_COMPONENT_API gpointer
mono_special_static_field_get_offset (MonoClassField *field, MonoError *error);
gboolean
mono_property_set_value_handle (MonoProperty *prop, MonoObjectHandle obj, void **params, MonoError *error);
MonoObject*
mono_property_get_value_checked (MonoProperty *prop, void *obj, void **params, MonoError *error);
MonoString*
mono_object_try_to_string (MonoObject *obj, MonoObject **exc, MonoError *error);
char *
mono_string_to_utf8_ignore (MonoString *s);
gboolean
mono_monitor_is_il_fastpath_wrapper (MonoMethod *method);
MonoStringHandle
mono_string_is_interned_lookup (MonoStringHandle str, gboolean insert, MonoError *error);
/**
* mono_string_intern_checked:
* \param str String to intern
* \param error set on error.
* Interns the string passed.
* \returns The interned string. On failure returns NULL and sets \p error
*/
#define mono_string_intern_checked(str, error) (mono_string_is_interned_lookup ((str), TRUE, (error)))
/**
* mono_string_is_interned_internal:
* \param o String to probe
* \returns Whether the string has been interned.
*/
#define mono_string_is_interned_internal(str, error) (mono_string_is_interned_lookup ((str), FALSE, (error)))
char *
mono_exception_handle_get_native_backtrace (MonoExceptionHandle exc);
char *
mono_exception_get_managed_backtrace (MonoException *exc);
gboolean
mono_exception_try_get_managed_backtrace (MonoException *exc, const char *prefix, char **result);
void
mono_copy_value (MonoType *type, void *dest, void *value, int deref_pointer);
void
mono_error_raise_exception_deprecated (MonoError *target_error);
gboolean
mono_error_set_pending_exception_slow (MonoError *error);
static inline gboolean
mono_error_set_pending_exception (MonoError *error)
{
return is_ok (error) ? FALSE : mono_error_set_pending_exception_slow (error);
}
MonoArray *
mono_glist_to_array (GList *list, MonoClass *eclass, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_new_checked (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_handle (MonoClass *klass, MonoError *error);
// This function skips handling of remoting and COM.
// "alloc" means "less".
MonoObjectHandle
mono_object_new_alloc_by_vtable (MonoVTable *vtable, MonoError *error);
MonoObject*
mono_object_new_mature (MonoVTable *vtable, MonoError *error);
MonoObjectHandle
mono_object_new_handle_mature (MonoVTable *vtable, MonoError *error);
MonoObject *
mono_object_clone_checked (MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_object_clone_handle (MonoObjectHandle obj, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_isinst_checked (MonoObject *obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst_mbyref (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
gboolean
mono_object_handle_isinst_mbyref_raw (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoStringHandle
mono_string_new_size_handle (gint32 len, MonoError *error);
MonoString*
mono_string_new_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_size_checked (gint32 len, MonoError *error);
MONO_COMPONENT_API MonoString*
mono_ldstr_checked (MonoImage *image, uint32_t str_index, MonoError *error);
MonoStringHandle
mono_ldstr_handle (MonoImage *image, uint32_t str_index, MonoError *error);
MONO_PROFILER_API MonoString*
mono_string_new_checked (const char *text, MonoError *merror);
MonoString*
mono_string_new_wtf8_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_utf16_checked (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf16_handle (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf8_len (const char *text, guint length, MonoError *error);
MonoString *
mono_string_from_utf16_checked (const mono_unichar2 *data, MonoError *error);
MonoString *
mono_string_from_utf32_checked (const mono_unichar4 *data, MonoError *error);
char*
mono_ldstr_utf8 (MonoImage *image, guint32 idx, MonoError *error);
MONO_COMPONENT_API
char*
mono_utf16_to_utf8 (const mono_unichar2 *s, gsize slength, MonoError *error);
char*
mono_utf16_to_utf8len (const mono_unichar2 *s, gsize slength, gsize *utf8_length, MonoError *error);
gboolean
mono_runtime_object_init_checked (MonoObject *this_obj, MonoError *error);
MONO_PROFILER_API MonoObject*
mono_runtime_try_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
// The exc parameter is deliberately missing and so far this has proven to reduce code duplication.
// In particular, if an exception is returned from underlying otherwise succeeded call,
// is set into the MonoError with mono_error_set_exception_instance.
// The result is that caller need only check MonoError.
MONO_COMPONENT_API MonoObjectHandle
mono_runtime_try_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MONO_COMPONENT_API MonoObject*
mono_runtime_invoke_checked (MonoMethod *method, void *obj, void **params, MonoError *error);
MonoObjectHandle
mono_runtime_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
void
mono_runtime_invoke_handle_void (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MonoObject*
mono_runtime_try_invoke_array (MonoMethod *method, void *obj, MonoArray *params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_invoke_span_checked (MonoMethod *method, void *obj, MonoSpanOfObjects *params,
MonoError *error);
void*
mono_compile_method_checked (MonoMethod *method, MonoError *error);
MonoObject*
mono_runtime_delegate_try_invoke (MonoObject *delegate, void **params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_delegate_invoke_checked (MonoObject *delegate, void **params,
MonoError *error);
MonoArrayHandle
mono_runtime_get_main_args_handle (MonoError *error);
int
mono_runtime_run_main_checked (MonoMethod *method, int argc, char* argv[],
MonoError *error);
int
mono_runtime_try_run_main (MonoMethod *method, int argc, char* argv[],
MonoObject **exc);
int
mono_runtime_exec_main_checked (MonoMethod *method, MonoArray *args, MonoError *error);
int
mono_runtime_try_exec_main (MonoMethod *method, MonoArray *args, MonoObject **exc);
MonoAssembly*
mono_try_assembly_resolve_handle (MonoAssemblyLoadContext *alc, MonoStringHandle fname, MonoAssembly *requesting, MonoError *error);
gboolean
mono_runtime_object_init_handle (MonoObjectHandle this_obj, MonoError *error);
/* GC write barriers support */
void
mono_gc_wbarrier_object_copy_handle (MonoObjectHandle obj, MonoObjectHandle src);
MonoMethod*
mono_class_get_virtual_method (MonoClass *klass, MonoMethod *method, MonoError *error);
MonoStringHandle
mono_string_empty_handle (void);
/*
* mono_object_get_data:
*
* Return a pointer to the beginning of data inside a MonoObject.
*/
static inline gpointer
mono_object_get_data (MonoObject *o)
{
return (guint8*)o + MONO_ABI_SIZEOF (MonoObject);
}
#define mono_handle_get_data_unsafe(handle) ((gpointer)((guint8*)MONO_HANDLE_RAW (handle) + MONO_ABI_SIZEOF (MonoObject)))
MONO_COMPONENT_API gpointer
mono_vtype_get_field_addr (gpointer vtype, MonoClassField *field);
#define MONO_OBJECT_SETREF_INTERNAL(obj,fieldname,value) do { \
mono_gc_wbarrier_set_field_internal ((MonoObject*)(obj), &((obj)->fieldname), (MonoObject*)value); \
/*(obj)->fieldname = (value);*/ \
} while (0)
/* This should be used if 's' can reside on the heap */
#define MONO_STRUCT_SETREF_INTERNAL(s,field,value) do { \
mono_gc_wbarrier_generic_store_internal (&((s)->field), (MonoObject*)(value)); \
} while (0)
static inline gunichar2*
mono_string_chars_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->chars;
}
static inline int
mono_string_length_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->length;
}
MonoString*
mono_string_empty_internal (MonoDomain *domain);
char *
mono_string_to_utf8len (MonoStringHandle s, gsize *utf8len, MonoError *error);
MONO_COMPONENT_API char*
mono_string_to_utf8_checked_internal (MonoString *string_obj, MonoError *error);
mono_bool
mono_string_equal_internal (MonoString *s1, MonoString *s2);
unsigned
mono_string_hash_internal (MonoString *s);
MONO_COMPONENT_API int
mono_object_hash_internal (MonoObject* obj);
ICALL_EXPORT
void
mono_value_copy_internal (void* dest, const void* src, MonoClass *klass);
void
mono_value_copy_array_internal (MonoArray *dest, int dest_idx, const void* src, int count);
MONO_PROFILER_API MonoVTable* mono_object_get_vtable_internal (MonoObject *obj);
MonoDomain*
mono_object_get_domain_internal (MonoObject *obj);
static inline gpointer
mono_object_unbox_internal (MonoObject *obj)
{
/* add assert for valuetypes? */
g_assert (m_class_is_valuetype (mono_object_class (obj)));
return mono_object_get_data (obj);
}
ICALL_EXPORT
void
mono_monitor_exit_internal (MonoObject *obj);
MONO_PROFILER_API unsigned mono_object_get_size_internal (MonoObject *o);
MONO_PROFILER_API MonoDomain* mono_vtable_domain_internal (MonoVTable *vtable);
MONO_PROFILER_API MonoClass* mono_vtable_class_internal (MonoVTable *vtable);
MONO_COMPONENT_API MonoMethod*
mono_object_get_virtual_method_internal (MonoObject *obj, MonoMethod *method);
MonoMethod*
mono_get_delegate_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_begin_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_end_invoke_internal (MonoClass *klass);
void
mono_unhandled_exception_internal (MonoObject *exc);
void
mono_print_unhandled_exception_internal (MonoObject *exc);
void
mono_raise_exception_internal (MonoException *ex);
void
mono_field_set_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_set_value_internal (MonoVTable *vt, MonoClassField *field, void *value);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MonoMethod* mono_get_context_capture_method (void);
guint8*
mono_runtime_get_aotid_arr (void);
/* GC handles support
*
* A handle can be created to refer to a managed object and either prevent it
* from being garbage collected or moved or to be able to know if it has been
* collected or not (weak references).
* mono_gchandle_new () is used to prevent an object from being garbage collected
* until mono_gchandle_free() is called. Use a TRUE value for the pinned argument to
* prevent the object from being moved (this should be avoided as much as possible
* and this should be used only for shorts periods of time or performance will suffer).
* To create a weakref use mono_gchandle_new_weakref (): track_resurrection should
* usually be false (see the GC docs for more details).
* mono_gchandle_get_target () can be used to get the object referenced by both kinds
* of handle: for a weakref handle, if an object has been collected, it will return NULL.
*/
MonoGCHandle
mono_gchandle_new_internal (MonoObject *obj, mono_bool pinned);
MONO_COMPONENT_API MonoGCHandle
mono_gchandle_new_weakref_internal (MonoObject *obj, mono_bool track_resurrection);
MONO_COMPONENT_API
MonoObject*
mono_gchandle_get_target_internal (MonoGCHandle gchandle);
MONO_COMPONENT_API void mono_gchandle_free_internal (MonoGCHandle gchandle);
/* Reference queue support
*
* A reference queue is used to get notifications of when objects are collected.
* Call mono_gc_reference_queue_new to create a new queue and pass the callback that
* will be invoked when registered objects are collected.
* Call mono_gc_reference_queue_add to register a pair of objects and data within a queue.
* The callback will be triggered once an object is both unreachable and finalized.
*/
MonoReferenceQueue*
mono_gc_reference_queue_new_internal (mono_reference_queue_callback callback);
void
mono_gc_reference_queue_free_internal (MonoReferenceQueue *queue);
mono_bool
mono_gc_reference_queue_add_internal (MonoReferenceQueue *queue, MonoObject *obj, void *user_data);
#define mono_gc_reference_queue_add_handle(queue, obj, user_data) \
(mono_gc_reference_queue_add_internal ((queue), MONO_HANDLE_RAW (MONO_HANDLE_CAST (MonoObject, obj)), (user_data)))
/* GC write barriers support */
void
mono_gc_wbarrier_set_field_internal (MonoObject *obj, void* field_ptr, MonoObject* value);
void
mono_gc_wbarrier_set_arrayref_internal (MonoArray *arr, void* slot_ptr, MonoObject* value);
void
mono_gc_wbarrier_arrayref_copy_internal (void* dest_ptr, const void* src_ptr, int count);
MONO_COMPONENT_API void
mono_gc_wbarrier_generic_store_internal (void volatile* ptr, MonoObject* value);
void
mono_gc_wbarrier_generic_store_atomic_internal (void *ptr, MonoObject *value);
ICALL_EXPORT
void
mono_gc_wbarrier_generic_nostore_internal (void* ptr);
void
mono_gc_wbarrier_value_copy_internal (void* dest, const void* src, int count, MonoClass *klass);
void
mono_gc_wbarrier_object_copy_internal (MonoObject* obj, MonoObject *src);
MONO_COMPONENT_API char *
mono_runtime_get_managed_cmd_line (void);
char *
mono_runtime_get_cmd_line (int argc, char **argv);
#ifdef HOST_WASM
int
mono_string_instance_is_interned (MonoString *str);
#endif
gpointer
mono_method_get_unmanaged_wrapper_ftnptr_internal (MonoMethod *method, gboolean only_unmanaged_callers_only, MonoError *error);
#endif /* __MONO_OBJECT_INTERNALS_H__ */
| 1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/native/public/mono/metadata/details/mono-private-unstable-functions.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
/**
*
* Private unstable APIs.
*
* WARNING: The declarations and behavior of functions in this header are NOT STABLE and can be modified or removed at
* any time.
*
*/
// This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION
#ifndef MONO_API_FUNCTION
#error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header"
#endif
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoAssembly *, mono_assembly_load_full_alc, (MonoAssemblyLoadContextGCHandle alc_gchandle, MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status))
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data_alc, (MonoAssemblyLoadContextGCHandle alc_gchandle, char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status, const char *name))
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY void, mono_install_assembly_preload_hook_v3, (MonoAssemblyPreLoadFuncV3 func, void *user_data, mono_bool append))
// This can point at NULL before the default ALC is initialized
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoAssemblyLoadContextGCHandle, mono_alc_get_default_gchandle, (void))
MONO_API_FUNCTION(void, mono_register_bundled_satellite_assemblies, (const MonoBundledSatelliteAssembly **assemblies))
MONO_API_FUNCTION(MonoBundledSatelliteAssembly *, mono_create_new_bundled_satellite_assembly, (const char *name, const char *culture, const unsigned char *data, unsigned int size))
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
/**
*
* Private unstable APIs.
*
* WARNING: The declarations and behavior of functions in this header are NOT STABLE and can be modified or removed at
* any time.
*
*/
// This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION
#ifndef MONO_API_FUNCTION
#error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header"
#endif
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoAssembly *, mono_assembly_load_full_alc, (MonoAssemblyLoadContextGCHandle alc_gchandle, MonoAssemblyName *aname, const char *basedir, MonoImageOpenStatus *status))
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoImage *, mono_image_open_from_data_alc, (MonoAssemblyLoadContextGCHandle alc_gchandle, char *data, uint32_t data_len, mono_bool need_copy, MonoImageOpenStatus *status, const char *name))
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY void, mono_install_assembly_preload_hook_v3, (MonoAssemblyPreLoadFuncV3 func, void *user_data, mono_bool append))
// This can point at NULL before the default ALC is initialized
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY MonoAssemblyLoadContextGCHandle, mono_alc_get_default_gchandle, (void))
MONO_API_FUNCTION(void, mono_register_bundled_satellite_assemblies, (const MonoBundledSatelliteAssembly **assemblies))
MONO_API_FUNCTION(MonoBundledSatelliteAssembly *, mono_create_new_bundled_satellite_assembly, (const char *name, const char *culture, const unsigned char *data, unsigned int size))
MONO_API_FUNCTION(MONO_RT_EXTERNAL_ONLY void*, mono_method_get_unmanaged_callers_only_ftnptr, (MonoMethod *method, MonoError *error))
| 1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/vm/eventpipeadapter.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __EVENTPIPE_ADAPTER_H__
#define __EVENTPIPE_ADAPTER_H__
#if defined(FEATURE_PERFTRACING)
#include <eventpipe/ep.h>
#include <eventpipe/ep-provider.h>
#include <eventpipe/ep-config.h>
#include <eventpipe/ep-event.h>
#include <eventpipe/ep-event-instance.h>
#include <eventpipe/ep-session.h>
#include <eventpipe/ep-session-provider.h>
#include <eventpipe/ep-metadata-generator.h>
#include <eventpipe/ep-event-payload.h>
#include <eventpipe/ep-buffer-manager.h>
class EventPipeProviderConfigurationAdapter final
{
public:
EventPipeProviderConfigurationAdapter(const COR_PRF_EVENTPIPE_PROVIDER_CONFIG *providerConfigs, uint32_t providerConfigsLen)
{
STATIC_CONTRACT_NOTHROW;
// This static_assert will fail because EventPipeProviderConfiguration uses char8_t strings rather than char16_t strings.
// This method takes the COR_PRF variant and converts to char8_t strings, so it should be fine.
// Leaving the assert commented out here for posterity.
//
// static_assert(offsetof(EventPipeProviderConfiguration, provider_name) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, providerName)
// && offsetof(EventPipeProviderConfiguration, keywords) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, keywords)
// && offsetof(EventPipeProviderConfiguration, logging_level) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, loggingLevel)
// && offsetof(EventPipeProviderConfiguration, filter_data) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, filterData)
// && sizeof(EventPipeProviderConfiguration) == sizeof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG),
// "Layouts of EventPipeProviderConfiguration type and COR_PRF_EVENTPIPE_PROVIDER_CONFIG type do not match!");
m_providerConfigs = new (nothrow) EventPipeProviderConfiguration[providerConfigsLen];
m_providerConfigsLen = providerConfigsLen;
if (m_providerConfigs) {
for (uint32_t i = 0; i < providerConfigsLen; ++i) {
ep_provider_config_init (
&m_providerConfigs[i],
ep_rt_utf16_to_utf8_string (reinterpret_cast<const ep_char16_t *>(providerConfigs[i].providerName), -1),
providerConfigs[i].keywords,
static_cast<EventPipeEventLevel>(providerConfigs[i].loggingLevel),
ep_rt_utf16_to_utf8_string (reinterpret_cast<const ep_char16_t *>(providerConfigs[i].filterData), -1));
}
}
}
~EventPipeProviderConfigurationAdapter()
{
STATIC_CONTRACT_NOTHROW;
if (m_providerConfigs) {
for (uint32_t i = 0; i < m_providerConfigsLen; ++i) {
ep_rt_utf8_string_free ((ep_char8_t *)ep_provider_config_get_provider_name (&m_providerConfigs[i]));
ep_rt_utf8_string_free ((ep_char8_t *)ep_provider_config_get_filter_data (&m_providerConfigs[i]));
}
delete [] m_providerConfigs;
}
}
inline const EventPipeProviderConfiguration * GetProviderConfigs() const
{
STATIC_CONTRACT_NOTHROW;
return m_providerConfigs;
}
inline uint32_t GetProviderConfigsLen() const
{
STATIC_CONTRACT_NOTHROW;
return m_providerConfigsLen;
}
private:
EventPipeProviderConfiguration *m_providerConfigs;
uint32_t m_providerConfigsLen;
};
class EventPipeParameterDescAdapter final
{
public:
EventPipeParameterDescAdapter(COR_PRF_EVENTPIPE_PARAM_DESC *params, uint32_t paramsLen)
{
STATIC_CONTRACT_NOTHROW;
#ifdef EP_INLINE_GETTER_SETTER
static_assert(offsetof(EventPipeParameterDesc, type) == offsetof(COR_PRF_EVENTPIPE_PARAM_DESC, type)
&& offsetof(EventPipeParameterDesc, element_type) == offsetof(COR_PRF_EVENTPIPE_PARAM_DESC, elementType)
&& offsetof(EventPipeParameterDesc, name) == offsetof(COR_PRF_EVENTPIPE_PARAM_DESC, name)
&& sizeof(EventPipeParameterDesc) == sizeof(COR_PRF_EVENTPIPE_PARAM_DESC),
"Layouts of EventPipeParameterDesc type and COR_PRF_EVENTPIPE_PARAM_DESC type do not match!");
#endif
m_params = reinterpret_cast<EventPipeParameterDesc *>(params);
m_paramsLen = paramsLen;
}
inline const EventPipeParameterDesc * GetParams() const
{
STATIC_CONTRACT_NOTHROW;
return m_params;
}
inline uint32_t GetParamsLen() const
{
STATIC_CONTRACT_NOTHROW;
return m_paramsLen;
}
private:
EventPipeParameterDesc *m_params;
uint32_t m_paramsLen;
};
class EventDataAdapter final
{
public:
EventDataAdapter(COR_PRF_EVENT_DATA *data, uint32_t dataLen)
{
STATIC_CONTRACT_NOTHROW;
#ifdef EP_INLINE_GETTER_SETTER
static_assert(offsetof(EventData, ptr) == offsetof(COR_PRF_EVENT_DATA, ptr)
&& offsetof(EventData, size) == offsetof(COR_PRF_EVENT_DATA, size)
&& sizeof(EventData) == sizeof(COR_PRF_EVENT_DATA),
"Layouts of EventData type and COR_PRF_EVENT_DATA type do not match!");
#endif
m_data = reinterpret_cast<EventData *>(data);
m_dataLen = dataLen;
}
inline const EventData * GetData() const
{
STATIC_CONTRACT_NOTHROW;
return m_data;
}
inline uint32_t GetDataLen() const
{
STATIC_CONTRACT_NOTHROW;
return m_dataLen;
}
private:
EventData *m_data;
uint32_t m_dataLen;
};
class EventPipeAdapter final
{
public:
static inline void Initialize()
{
CONTRACTL
{
NOTHROW;
}
CONTRACTL_END;
ep_init();
}
static inline void FinishInitialize()
{
CONTRACTL
{
NOTHROW;
}
CONTRACTL_END;
ep_finish_init();
}
static inline void Shutdown()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_shutdown();
}
static inline bool Enabled()
{
STATIC_CONTRACT_NOTHROW;
return ep_enabled();
}
static inline EventPipeSessionID Enable(
LPCWSTR outputPath,
uint32_t circularBufferSizeInMB,
const EventPipeProviderConfigurationAdapter &providerConfigs,
EventPipeSessionType sessionType,
EventPipeSerializationFormat format,
const bool rundownRequested,
IpcStream *const stream,
EventPipeSessionSynchronousCallback callback,
void *callbackAdditionalData)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
ep_char8_t *outputPathUTF8 = NULL;
if (outputPath)
outputPathUTF8 = ep_rt_utf16_to_utf8_string (reinterpret_cast<const ep_char16_t *>(outputPath), -1);
EventPipeSessionID result = ep_enable (
outputPathUTF8,
circularBufferSizeInMB,
providerConfigs.GetProviderConfigs(),
providerConfigs.GetProviderConfigsLen(),
sessionType,
format,
rundownRequested,
stream,
callback,
callbackAdditionalData);
ep_rt_utf8_string_free (outputPathUTF8);
return result;
}
static inline void Disable(EventPipeSessionID id)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_disable(id);
}
static inline void StartStreaming(EventPipeSessionID id)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_start_streaming(id);
}
static inline EventPipeSession * GetSession(EventPipeSessionID id)
{
STATIC_CONTRACT_NOTHROW;
return ep_get_session(id);
}
static inline HANDLE GetWaitHandle(EventPipeSessionID id)
{
STATIC_CONTRACT_NOTHROW;
return reinterpret_cast<HANDLE>(ep_get_wait_handle(id));
}
static inline FILETIME GetSessionStartTime(EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
FILETIME fileTime;
LARGE_INTEGER largeValue;
_ASSERTE(session != NULL);
largeValue.QuadPart = ep_session_get_session_start_time(session);
fileTime.dwLowDateTime = largeValue.u.LowPart;
fileTime.dwHighDateTime = largeValue.u.HighPart;
return fileTime;
}
static inline LONGLONG GetSessionStartTimestamp(EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
_ASSERTE(session != NULL);
return ep_session_get_session_start_timestamp(session);
}
static inline void AddProviderToSession(EventPipeSessionProvider *provider, EventPipeSession *session)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
ep_add_provider_to_session (provider, session);
}
static inline EventPipeProvider * CreateProvider(const SString &providerName, EventPipeCallback callback)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_char8_t *providerNameUTF8 = ep_rt_utf16_to_utf8_string(reinterpret_cast<const ep_char16_t *>(providerName.GetUnicode ()), -1);
EventPipeProvider * provider = ep_create_provider (providerNameUTF8, callback, NULL, NULL);
ep_rt_utf8_string_free (providerNameUTF8);
return provider;
}
static inline void DeleteProvider (EventPipeProvider * provider)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_delete_provider (provider);
}
static inline EventPipeProvider * GetProvider (LPCWSTR providerName)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
if (!providerName)
return NULL;
ep_char8_t *providerNameUTF8 = ep_rt_utf16_to_utf8_string(reinterpret_cast<const ep_char16_t *>(providerName), -1);
EventPipeProvider * provider = ep_get_provider (providerNameUTF8);
ep_rt_utf8_string_free(providerNameUTF8);
return provider;
}
static EventPipeSessionProvider * CreateSessionProvider(const EventPipeProviderConfigurationAdapter &providerConfig)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE (providerConfig.GetProviderConfigs() != NULL && providerConfig.GetProviderConfigsLen() == 1);
const EventPipeProviderConfiguration *config = providerConfig.GetProviderConfigs();
if (!config)
return NULL;
return ep_session_provider_alloc (
ep_provider_config_get_provider_name (&config[0]),
ep_provider_config_get_keywords (&config[0]),
(EventPipeEventLevel)ep_provider_config_get_logging_level (&config[0]),
ep_provider_config_get_filter_data (&config[0]));
}
static HRESULT GetProviderName(const EventPipeProvider *provider, ULONG numNameChars, ULONG *numNameCharsOut, LPWSTR name)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(provider != NULL);
HRESULT hr = S_OK;
const ep_char16_t *providerName = ep_provider_get_provider_name_utf16 (provider);
if (providerName) {
uint32_t numProviderNameChars = (uint32_t)(ep_rt_utf16_string_len (providerName) + 1);
if (numNameCharsOut)
*numNameCharsOut = numProviderNameChars;
if (numProviderNameChars >= numNameChars)
hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
else if (name)
memcpy (name, providerName, numProviderNameChars * sizeof (ep_char16_t));
}
return hr;
}
static EventPipeEvent * AddEvent(
EventPipeProvider *provider,
uint32_t eventID,
LPCWSTR eventName,
int64_t keywords,
uint32_t eventVersion,
EventPipeEventLevel level,
uint8_t opcode,
const EventPipeParameterDescAdapter ¶ms,
bool needStack)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
size_t metadataLen = 0;
EventPipeEvent *realEvent = NULL;
uint8_t *metadata = ep_metadata_generator_generate_event_metadata (
eventID,
reinterpret_cast<const ep_char16_t *>(eventName),
keywords,
eventVersion,
level,
opcode,
(EventPipeParameterDesc *)params.GetParams(),
params.GetParamsLen(),
&metadataLen);
if (metadata) {
realEvent = ep_provider_add_event(
provider,
eventID,
keywords,
eventVersion,
level,
needStack,
metadata,
(uint32_t)metadataLen);
ep_rt_byte_array_free(metadata);
}
return realEvent;
}
static inline EventPipeEvent * AddEvent(
EventPipeProvider *provider,
uint32_t eventID,
int64_t keywords,
uint32_t eventVersion,
EventPipeEventLevel level,
bool needStack,
BYTE *metadata = NULL,
uint32_t metadataLen = 0)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
return ep_provider_add_event(provider, eventID, keywords, eventVersion, level, needStack, metadata, metadataLen);
}
static inline void WriteEvent(
EventPipeEvent *ep_event,
BYTE *data,
uint32_t dataLen,
LPCGUID activityId,
LPCGUID relatedActivityId)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
ep_write_event(
ep_event,
data,
dataLen,
reinterpret_cast<const uint8_t*>(activityId),
reinterpret_cast<const uint8_t*>(relatedActivityId));
}
static inline void WriteEvent(
EventPipeEvent *ep_event,
EventData *data,
uint32_t dataLen,
LPCGUID activityId,
LPCGUID relatedActivityId)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
ep_write_event_2(
ep_event,
data,
dataLen,
reinterpret_cast<const uint8_t*>(activityId),
reinterpret_cast<const uint8_t*>(relatedActivityId));
}
static inline void WriteEvent(
EventPipeEvent *ep_event,
EventDataAdapter &data,
LPCGUID activityId,
LPCGUID relatedActivityId)
{
WriteEvent(
ep_event,
(EventData*)data.GetData(),
data.GetDataLen(),
activityId,
relatedActivityId);
}
static inline bool EventIsEnabled (const EventPipeEvent *epEvent)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_is_enabled(epEvent);
}
static inline EventPipeEventInstance * GetNextEvent (EventPipeSessionID id)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
return ep_get_next_event(id);
}
static inline EventPipeProvider * GetEventProvider (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_get_provider(ep_event_instance_get_ep_event(eventInstance));
}
static inline uint32_t GetEventID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_get_event_id(ep_event_instance_get_ep_event(eventInstance));
}
static inline uint64_t GetEventThreadID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_thread_id(eventInstance);
}
static inline int64_t GetEventTimestamp (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_timestamp(eventInstance);
}
static inline LPCGUID GetEventActivityID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
static_assert(sizeof(GUID) == EP_ACTIVITY_ID_SIZE, "Size missmatch, sizeof(GUID) should be equal to EP_ACTIVITY_ID_SIZE");
return reinterpret_cast<LPCGUID>(ep_event_instance_get_activity_id_cref(eventInstance));
}
static inline LPCGUID GetEventRelativeActivityID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
static_assert(sizeof(GUID) == EP_ACTIVITY_ID_SIZE, "Size missmatch, sizeof(GUID) should be equal to EP_ACTIVITY_ID_SIZE");
return reinterpret_cast<LPCGUID>(ep_event_instance_get_related_activity_id_cref(eventInstance));
}
static inline const BYTE * GetEventData (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_data(eventInstance);
}
static inline uint32_t GetEventDataLen (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_data_len(eventInstance);
}
static inline void ResumeSession (EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
ep_session_resume (session);
}
static inline void PauseSession (EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
ep_session_pause (session);
}
};
#endif // FEATURE_PERFTRACING
#endif // __EVENTPIPE_ADAPTER_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __EVENTPIPE_ADAPTER_H__
#define __EVENTPIPE_ADAPTER_H__
#if defined(FEATURE_PERFTRACING)
#include <eventpipe/ep.h>
#include <eventpipe/ep-provider.h>
#include <eventpipe/ep-config.h>
#include <eventpipe/ep-event.h>
#include <eventpipe/ep-event-instance.h>
#include <eventpipe/ep-session.h>
#include <eventpipe/ep-session-provider.h>
#include <eventpipe/ep-metadata-generator.h>
#include <eventpipe/ep-event-payload.h>
#include <eventpipe/ep-buffer-manager.h>
class EventPipeProviderConfigurationAdapter final
{
public:
EventPipeProviderConfigurationAdapter(const COR_PRF_EVENTPIPE_PROVIDER_CONFIG *providerConfigs, uint32_t providerConfigsLen)
{
STATIC_CONTRACT_NOTHROW;
// This static_assert will fail because EventPipeProviderConfiguration uses char8_t strings rather than char16_t strings.
// This method takes the COR_PRF variant and converts to char8_t strings, so it should be fine.
// Leaving the assert commented out here for posterity.
//
// static_assert(offsetof(EventPipeProviderConfiguration, provider_name) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, providerName)
// && offsetof(EventPipeProviderConfiguration, keywords) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, keywords)
// && offsetof(EventPipeProviderConfiguration, logging_level) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, loggingLevel)
// && offsetof(EventPipeProviderConfiguration, filter_data) == offsetof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG, filterData)
// && sizeof(EventPipeProviderConfiguration) == sizeof(COR_PRF_EVENTPIPE_PROVIDER_CONFIG),
// "Layouts of EventPipeProviderConfiguration type and COR_PRF_EVENTPIPE_PROVIDER_CONFIG type do not match!");
m_providerConfigs = new (nothrow) EventPipeProviderConfiguration[providerConfigsLen];
m_providerConfigsLen = providerConfigsLen;
if (m_providerConfigs) {
for (uint32_t i = 0; i < providerConfigsLen; ++i) {
ep_provider_config_init (
&m_providerConfigs[i],
ep_rt_utf16_to_utf8_string (reinterpret_cast<const ep_char16_t *>(providerConfigs[i].providerName), -1),
providerConfigs[i].keywords,
static_cast<EventPipeEventLevel>(providerConfigs[i].loggingLevel),
ep_rt_utf16_to_utf8_string (reinterpret_cast<const ep_char16_t *>(providerConfigs[i].filterData), -1));
}
}
}
~EventPipeProviderConfigurationAdapter()
{
STATIC_CONTRACT_NOTHROW;
if (m_providerConfigs) {
for (uint32_t i = 0; i < m_providerConfigsLen; ++i) {
ep_rt_utf8_string_free ((ep_char8_t *)ep_provider_config_get_provider_name (&m_providerConfigs[i]));
ep_rt_utf8_string_free ((ep_char8_t *)ep_provider_config_get_filter_data (&m_providerConfigs[i]));
}
delete [] m_providerConfigs;
}
}
inline const EventPipeProviderConfiguration * GetProviderConfigs() const
{
STATIC_CONTRACT_NOTHROW;
return m_providerConfigs;
}
inline uint32_t GetProviderConfigsLen() const
{
STATIC_CONTRACT_NOTHROW;
return m_providerConfigsLen;
}
private:
EventPipeProviderConfiguration *m_providerConfigs;
uint32_t m_providerConfigsLen;
};
class EventPipeParameterDescAdapter final
{
public:
EventPipeParameterDescAdapter(COR_PRF_EVENTPIPE_PARAM_DESC *params, uint32_t paramsLen)
{
STATIC_CONTRACT_NOTHROW;
#ifdef EP_INLINE_GETTER_SETTER
static_assert(offsetof(EventPipeParameterDesc, type) == offsetof(COR_PRF_EVENTPIPE_PARAM_DESC, type)
&& offsetof(EventPipeParameterDesc, element_type) == offsetof(COR_PRF_EVENTPIPE_PARAM_DESC, elementType)
&& offsetof(EventPipeParameterDesc, name) == offsetof(COR_PRF_EVENTPIPE_PARAM_DESC, name)
&& sizeof(EventPipeParameterDesc) == sizeof(COR_PRF_EVENTPIPE_PARAM_DESC),
"Layouts of EventPipeParameterDesc type and COR_PRF_EVENTPIPE_PARAM_DESC type do not match!");
#endif
m_params = reinterpret_cast<EventPipeParameterDesc *>(params);
m_paramsLen = paramsLen;
}
inline const EventPipeParameterDesc * GetParams() const
{
STATIC_CONTRACT_NOTHROW;
return m_params;
}
inline uint32_t GetParamsLen() const
{
STATIC_CONTRACT_NOTHROW;
return m_paramsLen;
}
private:
EventPipeParameterDesc *m_params;
uint32_t m_paramsLen;
};
class EventDataAdapter final
{
public:
EventDataAdapter(COR_PRF_EVENT_DATA *data, uint32_t dataLen)
{
STATIC_CONTRACT_NOTHROW;
#ifdef EP_INLINE_GETTER_SETTER
static_assert(offsetof(EventData, ptr) == offsetof(COR_PRF_EVENT_DATA, ptr)
&& offsetof(EventData, size) == offsetof(COR_PRF_EVENT_DATA, size)
&& sizeof(EventData) == sizeof(COR_PRF_EVENT_DATA),
"Layouts of EventData type and COR_PRF_EVENT_DATA type do not match!");
#endif
m_data = reinterpret_cast<EventData *>(data);
m_dataLen = dataLen;
}
inline const EventData * GetData() const
{
STATIC_CONTRACT_NOTHROW;
return m_data;
}
inline uint32_t GetDataLen() const
{
STATIC_CONTRACT_NOTHROW;
return m_dataLen;
}
private:
EventData *m_data;
uint32_t m_dataLen;
};
class EventPipeAdapter final
{
public:
static inline void Initialize()
{
CONTRACTL
{
NOTHROW;
}
CONTRACTL_END;
ep_init();
}
static inline void FinishInitialize()
{
CONTRACTL
{
NOTHROW;
}
CONTRACTL_END;
ep_finish_init();
}
static inline void Shutdown()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_shutdown();
}
static inline bool Enabled()
{
STATIC_CONTRACT_NOTHROW;
return ep_enabled();
}
static inline EventPipeSessionID Enable(
LPCWSTR outputPath,
uint32_t circularBufferSizeInMB,
const EventPipeProviderConfigurationAdapter &providerConfigs,
EventPipeSessionType sessionType,
EventPipeSerializationFormat format,
const bool rundownRequested,
IpcStream *const stream,
EventPipeSessionSynchronousCallback callback,
void *callbackAdditionalData)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
ep_char8_t *outputPathUTF8 = NULL;
if (outputPath)
outputPathUTF8 = ep_rt_utf16_to_utf8_string (reinterpret_cast<const ep_char16_t *>(outputPath), -1);
EventPipeSessionID result = ep_enable (
outputPathUTF8,
circularBufferSizeInMB,
providerConfigs.GetProviderConfigs(),
providerConfigs.GetProviderConfigsLen(),
sessionType,
format,
rundownRequested,
stream,
callback,
callbackAdditionalData);
ep_rt_utf8_string_free (outputPathUTF8);
return result;
}
static inline void Disable(EventPipeSessionID id)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_disable(id);
}
static inline void StartStreaming(EventPipeSessionID id)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_start_streaming(id);
}
static inline EventPipeSession * GetSession(EventPipeSessionID id)
{
STATIC_CONTRACT_NOTHROW;
return ep_get_session(id);
}
static inline HANDLE GetWaitHandle(EventPipeSessionID id)
{
STATIC_CONTRACT_NOTHROW;
return reinterpret_cast<HANDLE>(ep_get_wait_handle(id));
}
static inline FILETIME GetSessionStartTime(EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
FILETIME fileTime;
LARGE_INTEGER largeValue;
_ASSERTE(session != NULL);
largeValue.QuadPart = ep_session_get_session_start_time(session);
fileTime.dwLowDateTime = largeValue.u.LowPart;
fileTime.dwHighDateTime = largeValue.u.HighPart;
return fileTime;
}
static inline LONGLONG GetSessionStartTimestamp(EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
_ASSERTE(session != NULL);
return ep_session_get_session_start_timestamp(session);
}
static inline void AddProviderToSession(EventPipeSessionProvider *provider, EventPipeSession *session)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
ep_add_provider_to_session (provider, session);
}
static inline EventPipeProvider * CreateProvider(const SString &providerName, EventPipeCallback callback)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_char8_t *providerNameUTF8 = ep_rt_utf16_to_utf8_string(reinterpret_cast<const ep_char16_t *>(providerName.GetUnicode ()), -1);
EventPipeProvider * provider = ep_create_provider (providerNameUTF8, callback, NULL, NULL);
ep_rt_utf8_string_free (providerNameUTF8);
return provider;
}
static inline void DeleteProvider (EventPipeProvider * provider)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ep_delete_provider (provider);
}
static inline EventPipeProvider * GetProvider (LPCWSTR providerName)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
if (!providerName)
return NULL;
ep_char8_t *providerNameUTF8 = ep_rt_utf16_to_utf8_string(reinterpret_cast<const ep_char16_t *>(providerName), -1);
EventPipeProvider * provider = ep_get_provider (providerNameUTF8);
ep_rt_utf8_string_free(providerNameUTF8);
return provider;
}
static EventPipeSessionProvider * CreateSessionProvider(const EventPipeProviderConfigurationAdapter &providerConfig)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE (providerConfig.GetProviderConfigs() != NULL && providerConfig.GetProviderConfigsLen() == 1);
const EventPipeProviderConfiguration *config = providerConfig.GetProviderConfigs();
if (!config)
return NULL;
return ep_session_provider_alloc (
ep_provider_config_get_provider_name (&config[0]),
ep_provider_config_get_keywords (&config[0]),
(EventPipeEventLevel)ep_provider_config_get_logging_level (&config[0]),
ep_provider_config_get_filter_data (&config[0]));
}
static HRESULT GetProviderName(const EventPipeProvider *provider, ULONG numNameChars, ULONG *numNameCharsOut, LPWSTR name)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(provider != NULL);
HRESULT hr = S_OK;
const ep_char16_t *providerName = ep_provider_get_provider_name_utf16 (provider);
if (providerName) {
uint32_t numProviderNameChars = (uint32_t)(ep_rt_utf16_string_len (providerName) + 1);
if (numNameCharsOut)
*numNameCharsOut = numProviderNameChars;
if (numProviderNameChars >= numNameChars)
hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
else if (name)
memcpy (name, providerName, numProviderNameChars * sizeof (ep_char16_t));
}
return hr;
}
static EventPipeEvent * AddEvent(
EventPipeProvider *provider,
uint32_t eventID,
LPCWSTR eventName,
int64_t keywords,
uint32_t eventVersion,
EventPipeEventLevel level,
uint8_t opcode,
const EventPipeParameterDescAdapter ¶ms,
bool needStack)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
size_t metadataLen = 0;
EventPipeEvent *realEvent = NULL;
uint8_t *metadata = ep_metadata_generator_generate_event_metadata (
eventID,
reinterpret_cast<const ep_char16_t *>(eventName),
keywords,
eventVersion,
level,
opcode,
(EventPipeParameterDesc *)params.GetParams(),
params.GetParamsLen(),
&metadataLen);
if (metadata) {
realEvent = ep_provider_add_event(
provider,
eventID,
keywords,
eventVersion,
level,
needStack,
metadata,
(uint32_t)metadataLen);
ep_rt_byte_array_free(metadata);
}
return realEvent;
}
static inline EventPipeEvent * AddEvent(
EventPipeProvider *provider,
uint32_t eventID,
int64_t keywords,
uint32_t eventVersion,
EventPipeEventLevel level,
bool needStack,
BYTE *metadata = NULL,
uint32_t metadataLen = 0)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
return ep_provider_add_event(provider, eventID, keywords, eventVersion, level, needStack, metadata, metadataLen);
}
static inline void WriteEvent(
EventPipeEvent *ep_event,
BYTE *data,
uint32_t dataLen,
LPCGUID activityId,
LPCGUID relatedActivityId)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
ep_write_event(
ep_event,
data,
dataLen,
reinterpret_cast<const uint8_t*>(activityId),
reinterpret_cast<const uint8_t*>(relatedActivityId));
}
static inline void WriteEvent(
EventPipeEvent *ep_event,
EventData *data,
uint32_t dataLen,
LPCGUID activityId,
LPCGUID relatedActivityId)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
ep_write_event_2(
ep_event,
data,
dataLen,
reinterpret_cast<const uint8_t*>(activityId),
reinterpret_cast<const uint8_t*>(relatedActivityId));
}
static inline void WriteEvent(
EventPipeEvent *ep_event,
EventDataAdapter &data,
LPCGUID activityId,
LPCGUID relatedActivityId)
{
WriteEvent(
ep_event,
(EventData*)data.GetData(),
data.GetDataLen(),
activityId,
relatedActivityId);
}
static inline bool EventIsEnabled (const EventPipeEvent *epEvent)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_is_enabled(epEvent);
}
static inline EventPipeEventInstance * GetNextEvent (EventPipeSessionID id)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
return ep_get_next_event(id);
}
static inline EventPipeProvider * GetEventProvider (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_get_provider(ep_event_instance_get_ep_event(eventInstance));
}
static inline uint32_t GetEventID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_get_event_id(ep_event_instance_get_ep_event(eventInstance));
}
static inline uint64_t GetEventThreadID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_thread_id(eventInstance);
}
static inline int64_t GetEventTimestamp (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_timestamp(eventInstance);
}
static inline LPCGUID GetEventActivityID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
static_assert(sizeof(GUID) == EP_ACTIVITY_ID_SIZE, "Size missmatch, sizeof(GUID) should be equal to EP_ACTIVITY_ID_SIZE");
return reinterpret_cast<LPCGUID>(ep_event_instance_get_activity_id_cref(eventInstance));
}
static inline LPCGUID GetEventRelativeActivityID (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
static_assert(sizeof(GUID) == EP_ACTIVITY_ID_SIZE, "Size missmatch, sizeof(GUID) should be equal to EP_ACTIVITY_ID_SIZE");
return reinterpret_cast<LPCGUID>(ep_event_instance_get_related_activity_id_cref(eventInstance));
}
static inline const BYTE * GetEventData (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_data(eventInstance);
}
static inline uint32_t GetEventDataLen (EventPipeEventInstance *eventInstance)
{
STATIC_CONTRACT_NOTHROW;
return ep_event_instance_get_data_len(eventInstance);
}
static inline void ResumeSession (EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
ep_session_resume (session);
}
static inline void PauseSession (EventPipeSession *session)
{
STATIC_CONTRACT_NOTHROW;
ep_session_pause (session);
}
};
#endif // FEATURE_PERFTRACING
#endif // __EVENTPIPE_ADAPTER_H__
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/jit/ssabuilder.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#pragma warning(disable : 4503) // 'identifier' : decorated name length exceeded, name was truncated
#include "compiler.h"
#include "ssarenamestate.h"
typedef int LclVarNum;
// Pair of a local var name eg: V01 and Ssa number; eg: V01_01
typedef std::pair<LclVarNum, int> SsaVarName;
class SsaBuilder
{
private:
inline void EndPhase(Phases phase)
{
m_pCompiler->EndPhase(phase);
}
bool IncludeInSsa(unsigned lclNum);
public:
// Constructor
SsaBuilder(Compiler* pCompiler);
// Requires stmt nodes to be already sequenced in evaluation order. Analyzes the graph
// for introduction of phi-nodes as GT_PHI tree nodes at the beginning of each block.
// Each GT_LCL_VAR is given its ssa number through its GetSsaNum() field in the node.
// Each GT_PHI node will be under a GT_ASG node with the LHS set to the local node and
// the RHS to the GT_PHI itself. The inputs to the PHI are represented as a linked list
// of GT_PHI_ARG nodes. Each use or def is denoted by the corresponding GT_LCL_VAR
// tree. For example, to get all uses of a particular variable fully defined by its
// lclNum and ssaNum, one would use m_uses and look up all the uses. Similarly, a single
// def of an SSA variable can be looked up similarly using m_defs member.
void Build();
private:
// Ensures that the basic block graph has a root for the dominator graph, by ensuring
// that there is a first block that is not in a try region (adding an empty block for that purpose
// if necessary). Eventually should move to Compiler.
void SetupBBRoot();
// Requires "postOrder" to be an array of size "count". Requires "count" to at least
// be the size of the flow graph. Sorts the current compiler's flow-graph and places
// the blocks in post order (i.e., a node's children first) in the array. Returns the
// number of nodes visited while sorting the graph. In other words, valid entries in
// the output array.
int TopologicalSort(BasicBlock** postOrder, int count);
// Requires "postOrder" to hold the blocks of the flowgraph in topologically sorted
// order. Requires count to be the valid entries in the "postOrder" array. Computes
// each block's immediate dominator and records it in the BasicBlock in bbIDom.
void ComputeImmediateDom(BasicBlock** postOrder, int count);
// Compute flow graph dominance frontiers.
void ComputeDominanceFrontiers(BasicBlock** postOrder, int count, BlkToBlkVectorMap* mapDF);
// Compute the iterated dominance frontier for the specified block.
void ComputeIteratedDominanceFrontier(BasicBlock* b, const BlkToBlkVectorMap* mapDF, BlkVector* bIDF);
// Insert a new GT_PHI statement.
void InsertPhi(BasicBlock* block, unsigned lclNum);
// Add a new GT_PHI_ARG node to an existing GT_PHI node
void AddPhiArg(
BasicBlock* block, Statement* stmt, GenTreePhi* phi, unsigned lclNum, unsigned ssaNum, BasicBlock* pred);
// Requires "postOrder" to hold the blocks of the flowgraph in topologically sorted order. Requires
// count to be the valid entries in the "postOrder" array. Inserts GT_PHI nodes at the beginning
// of basic blocks that require them like so:
// GT_ASG(GT_LCL_VAR, GT_PHI(GT_PHI_ARG(ssaNum, Block*), GT_PHI_ARG(ssaNum, Block*), ...));
void InsertPhiFunctions(BasicBlock** postOrder, int count);
// Rename all definitions and uses within the compiled method.
void RenameVariables();
// Rename all definitions and uses within a block.
void BlockRenameVariables(BasicBlock* block);
// Rename a local or memory definition generated by a GT_ASG node.
void RenameDef(GenTreeOp* asgNode, BasicBlock* block);
// Rename a use of a local variable.
void RenameLclUse(GenTreeLclVarCommon* lclNode);
// Assumes that "block" contains a definition for local var "lclNum", with SSA number "ssaNum".
// IF "block" is within one or more try blocks,
// and the local variable is live at the start of the corresponding handlers,
// add this SSA number "ssaNum" to the argument list of the phi for the variable in the start
// block of those handlers.
void AddDefToHandlerPhis(BasicBlock* block, unsigned lclNum, unsigned ssaNum);
// Same as above, for memory.
void AddMemoryDefToHandlerPhis(MemoryKind memoryKind, BasicBlock* block, unsigned ssaNum);
// Add GT_PHI_ARG nodes to the GT_PHI nodes within block's successors.
void AddPhiArgsToSuccessors(BasicBlock* block);
#ifdef DEBUG
void Print(BasicBlock** postOrder, int count);
#endif
private:
Compiler* m_pCompiler;
CompAllocator m_allocator;
// Bit vector used by TopologicalSort and ComputeImmediateDom to track already visited blocks.
BitVecTraits m_visitedTraits;
BitVec m_visited;
SsaRenameState m_renameStack;
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#pragma warning(disable : 4503) // 'identifier' : decorated name length exceeded, name was truncated
#include "compiler.h"
#include "ssarenamestate.h"
typedef int LclVarNum;
// Pair of a local var name eg: V01 and Ssa number; eg: V01_01
typedef std::pair<LclVarNum, int> SsaVarName;
class SsaBuilder
{
private:
inline void EndPhase(Phases phase)
{
m_pCompiler->EndPhase(phase);
}
bool IncludeInSsa(unsigned lclNum);
public:
// Constructor
SsaBuilder(Compiler* pCompiler);
// Requires stmt nodes to be already sequenced in evaluation order. Analyzes the graph
// for introduction of phi-nodes as GT_PHI tree nodes at the beginning of each block.
// Each GT_LCL_VAR is given its ssa number through its GetSsaNum() field in the node.
// Each GT_PHI node will be under a GT_ASG node with the LHS set to the local node and
// the RHS to the GT_PHI itself. The inputs to the PHI are represented as a linked list
// of GT_PHI_ARG nodes. Each use or def is denoted by the corresponding GT_LCL_VAR
// tree. For example, to get all uses of a particular variable fully defined by its
// lclNum and ssaNum, one would use m_uses and look up all the uses. Similarly, a single
// def of an SSA variable can be looked up similarly using m_defs member.
void Build();
private:
// Ensures that the basic block graph has a root for the dominator graph, by ensuring
// that there is a first block that is not in a try region (adding an empty block for that purpose
// if necessary). Eventually should move to Compiler.
void SetupBBRoot();
// Requires "postOrder" to be an array of size "count". Requires "count" to at least
// be the size of the flow graph. Sorts the current compiler's flow-graph and places
// the blocks in post order (i.e., a node's children first) in the array. Returns the
// number of nodes visited while sorting the graph. In other words, valid entries in
// the output array.
int TopologicalSort(BasicBlock** postOrder, int count);
// Requires "postOrder" to hold the blocks of the flowgraph in topologically sorted
// order. Requires count to be the valid entries in the "postOrder" array. Computes
// each block's immediate dominator and records it in the BasicBlock in bbIDom.
void ComputeImmediateDom(BasicBlock** postOrder, int count);
// Compute flow graph dominance frontiers.
void ComputeDominanceFrontiers(BasicBlock** postOrder, int count, BlkToBlkVectorMap* mapDF);
// Compute the iterated dominance frontier for the specified block.
void ComputeIteratedDominanceFrontier(BasicBlock* b, const BlkToBlkVectorMap* mapDF, BlkVector* bIDF);
// Insert a new GT_PHI statement.
void InsertPhi(BasicBlock* block, unsigned lclNum);
// Add a new GT_PHI_ARG node to an existing GT_PHI node
void AddPhiArg(
BasicBlock* block, Statement* stmt, GenTreePhi* phi, unsigned lclNum, unsigned ssaNum, BasicBlock* pred);
// Requires "postOrder" to hold the blocks of the flowgraph in topologically sorted order. Requires
// count to be the valid entries in the "postOrder" array. Inserts GT_PHI nodes at the beginning
// of basic blocks that require them like so:
// GT_ASG(GT_LCL_VAR, GT_PHI(GT_PHI_ARG(ssaNum, Block*), GT_PHI_ARG(ssaNum, Block*), ...));
void InsertPhiFunctions(BasicBlock** postOrder, int count);
// Rename all definitions and uses within the compiled method.
void RenameVariables();
// Rename all definitions and uses within a block.
void BlockRenameVariables(BasicBlock* block);
// Rename a local or memory definition generated by a GT_ASG node.
void RenameDef(GenTreeOp* asgNode, BasicBlock* block);
// Rename a use of a local variable.
void RenameLclUse(GenTreeLclVarCommon* lclNode);
// Assumes that "block" contains a definition for local var "lclNum", with SSA number "ssaNum".
// IF "block" is within one or more try blocks,
// and the local variable is live at the start of the corresponding handlers,
// add this SSA number "ssaNum" to the argument list of the phi for the variable in the start
// block of those handlers.
void AddDefToHandlerPhis(BasicBlock* block, unsigned lclNum, unsigned ssaNum);
// Same as above, for memory.
void AddMemoryDefToHandlerPhis(MemoryKind memoryKind, BasicBlock* block, unsigned ssaNum);
// Add GT_PHI_ARG nodes to the GT_PHI nodes within block's successors.
void AddPhiArgsToSuccessors(BasicBlock* block);
#ifdef DEBUG
void Print(BasicBlock** postOrder, int count);
#endif
private:
Compiler* m_pCompiler;
CompAllocator m_allocator;
// Bit vector used by TopologicalSort and ComputeImmediateDom to track already visited blocks.
BitVecTraits m_visitedTraits;
BitVec m_visited;
SsaRenameState m_renameStack;
};
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/vm/debuginfostore.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// DebugInfoStore
#ifndef __DebugInfoStore_H_
#define __DebugInfoStore_H_
// Debugging information is described in CorInfo.h
#include "corinfo.h"
#include "nibblestream.h"
//-----------------------------------------------------------------------------
// Information to request Debug info.
//-----------------------------------------------------------------------------
class DebugInfoRequest
{
public:
#ifdef _DEBUG
// Must initialize via an Init*() function, not just a ctor.
// In debug, ctor sets fields to values that will cause asserts if not initialized.
DebugInfoRequest()
{
SUPPORTS_DAC;
m_pMD = NULL;
m_addrStart = NULL;
}
#endif
// Eventually we may have many ways to initialize a request.
// Init given a method desc and starting address for a native code blob.
void InitFromStartingAddr(MethodDesc * pDesc, PCODE addrCode);
MethodDesc * GetMD() const { LIMITED_METHOD_DAC_CONTRACT; return m_pMD; }
PCODE GetStartAddress() const { LIMITED_METHOD_DAC_CONTRACT; return m_addrStart; }
protected:
MethodDesc * m_pMD;
PCODE m_addrStart;
};
//-----------------------------------------------------------------------------
// A Debug-Info Store abstracts the storage of debugging information
//-----------------------------------------------------------------------------
// We pass the IDS an allocator which it uses to hand the data back.
// pData is data the allocator may use for 'new'.
// Eg, perhaps we have multiple heaps (eg, loader-heaps per appdomain).
typedef BYTE* (*FP_IDS_NEW)(void * pData, size_t cBytes);
//-----------------------------------------------------------------------------
// Utility routines used for compression
// Note that the compression is just an implementation detail of the stores,
// and so these are just utility routines exposed to the stores.
//-----------------------------------------------------------------------------
class CompressDebugInfo
{
public:
// Compress incoming data and write it to the provided NibbleWriter.
static void CompressBoundaries(
IN ULONG32 cMap,
IN ICorDebugInfo::OffsetMapping *pMap,
IN OUT NibbleWriter * pWriter
);
static void CompressVars(
IN ULONG32 cVars,
IN ICorDebugInfo::NativeVarInfo *vars,
IN OUT NibbleWriter * pBuffer
);
// Stores the result into SBuffer (used by NGen), or in LoaderHeap (used by JIT)
static PTR_BYTE CompressBoundariesAndVars(
IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
IN ULONG iOffsetMapping,
IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
IN ULONG iNativeVarInfo,
IN PatchpointInfo * patchpointInfo,
IN OUT SBuffer * pDebugInfoBuffer,
IN LoaderHeap * pLoaderHeap
);
public:
// Uncompress data supplied by Compress functions.
static void RestoreBoundariesAndVars(
IN FP_IDS_NEW fpNew, IN void * pNewData,
IN PTR_BYTE pDebugInfo,
OUT ULONG32 * pcMap, // number of entries in ppMap
OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
OUT ULONG32 *pcVars,
OUT ICorDebugInfo::NativeVarInfo **ppVars,
BOOL hasFlagByte
);
#ifdef FEATURE_ON_STACK_REPLACEMENT
static PatchpointInfo * RestorePatchpointInfo(
IN PTR_BYTE pDebugInfo
);
#endif
#ifdef DACCESS_COMPILE
static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte);
#endif
};
//-----------------------------------------------------------------------------
// Debug-Info-manager. This is like a process-wide store.
// There should be only 1 instance of this and it's process-wide.
// It will delegate to sub-stores as needed
//-----------------------------------------------------------------------------
class DebugInfoManager
{
public:
static BOOL GetBoundariesAndVars(
const DebugInfoRequest & request,
IN FP_IDS_NEW fpNew, IN void * pNewData,
OUT ULONG32 * pcMap,
OUT ICorDebugInfo::OffsetMapping ** ppMap,
OUT ULONG32 * pcVars,
OUT ICorDebugInfo::NativeVarInfo ** ppVars);
#ifdef DACCESS_COMPILE
static void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
#endif
};
#endif // __DebugInfoStore_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// DebugInfoStore
#ifndef __DebugInfoStore_H_
#define __DebugInfoStore_H_
// Debugging information is described in CorInfo.h
#include "corinfo.h"
#include "nibblestream.h"
//-----------------------------------------------------------------------------
// Information to request Debug info.
//-----------------------------------------------------------------------------
class DebugInfoRequest
{
public:
#ifdef _DEBUG
// Must initialize via an Init*() function, not just a ctor.
// In debug, ctor sets fields to values that will cause asserts if not initialized.
DebugInfoRequest()
{
SUPPORTS_DAC;
m_pMD = NULL;
m_addrStart = NULL;
}
#endif
// Eventually we may have many ways to initialize a request.
// Init given a method desc and starting address for a native code blob.
void InitFromStartingAddr(MethodDesc * pDesc, PCODE addrCode);
MethodDesc * GetMD() const { LIMITED_METHOD_DAC_CONTRACT; return m_pMD; }
PCODE GetStartAddress() const { LIMITED_METHOD_DAC_CONTRACT; return m_addrStart; }
protected:
MethodDesc * m_pMD;
PCODE m_addrStart;
};
//-----------------------------------------------------------------------------
// A Debug-Info Store abstracts the storage of debugging information
//-----------------------------------------------------------------------------
// We pass the IDS an allocator which it uses to hand the data back.
// pData is data the allocator may use for 'new'.
// Eg, perhaps we have multiple heaps (eg, loader-heaps per appdomain).
typedef BYTE* (*FP_IDS_NEW)(void * pData, size_t cBytes);
//-----------------------------------------------------------------------------
// Utility routines used for compression
// Note that the compression is just an implementation detail of the stores,
// and so these are just utility routines exposed to the stores.
//-----------------------------------------------------------------------------
class CompressDebugInfo
{
public:
// Compress incoming data and write it to the provided NibbleWriter.
static void CompressBoundaries(
IN ULONG32 cMap,
IN ICorDebugInfo::OffsetMapping *pMap,
IN OUT NibbleWriter * pWriter
);
static void CompressVars(
IN ULONG32 cVars,
IN ICorDebugInfo::NativeVarInfo *vars,
IN OUT NibbleWriter * pBuffer
);
// Stores the result into SBuffer (used by NGen), or in LoaderHeap (used by JIT)
static PTR_BYTE CompressBoundariesAndVars(
IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
IN ULONG iOffsetMapping,
IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
IN ULONG iNativeVarInfo,
IN PatchpointInfo * patchpointInfo,
IN OUT SBuffer * pDebugInfoBuffer,
IN LoaderHeap * pLoaderHeap
);
public:
// Uncompress data supplied by Compress functions.
static void RestoreBoundariesAndVars(
IN FP_IDS_NEW fpNew, IN void * pNewData,
IN PTR_BYTE pDebugInfo,
OUT ULONG32 * pcMap, // number of entries in ppMap
OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
OUT ULONG32 *pcVars,
OUT ICorDebugInfo::NativeVarInfo **ppVars,
BOOL hasFlagByte
);
#ifdef FEATURE_ON_STACK_REPLACEMENT
static PatchpointInfo * RestorePatchpointInfo(
IN PTR_BYTE pDebugInfo
);
#endif
#ifdef DACCESS_COMPILE
static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte);
#endif
};
//-----------------------------------------------------------------------------
// Debug-Info-manager. This is like a process-wide store.
// There should be only 1 instance of this and it's process-wide.
// It will delegate to sub-stores as needed
//-----------------------------------------------------------------------------
class DebugInfoManager
{
public:
static BOOL GetBoundariesAndVars(
const DebugInfoRequest & request,
IN FP_IDS_NEW fpNew, IN void * pNewData,
OUT ULONG32 * pcMap,
OUT ICorDebugInfo::OffsetMapping ** ppMap,
OUT ULONG32 * pcVars,
OUT ICorDebugInfo::NativeVarInfo ** ppVars);
#ifdef DACCESS_COMPILE
static void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
#endif
};
#endif // __DebugInfoStore_H_
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/utils/mono-path.c | /**
* \file
* Routines for handling path names.
*
* Authors:
* Gonzalo Paniagua Javier ([email protected])
* Miguel de Icaza ([email protected])
*
* (C) 2006 Novell, Inc. http://www.novell.com
*
*/
#include <config.h>
#include <glib.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
/* This is only needed for the mono_path_canonicalize code, MAXSYMLINKS, could be moved */
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#include "mono-path.h"
/* Embedded systems lack MAXSYMLINKS */
#ifndef MAXSYMLINKS
#define MAXSYMLINKS 3
#endif
/* Resolves '..' and '.' references in a path. If the path provided is relative,
* it will be relative to the current directory */
/* For Native Client, the above is not true. Since there is no getcwd we fill */
/* in the file being passed in relative to '.' and don't resolve it */
/* There are a couple of tests for this method in mono/test/mono-path.cs */
gchar *
mono_path_canonicalize (const char *path)
{
gchar *abspath, *pos, *lastpos, *dest;
int backc;
if (g_path_is_absolute (path)) {
abspath = g_strdup (path);
} else {
gchar *tmpdir = g_get_current_dir ();
abspath = g_build_filename (tmpdir, path, (const char*)NULL);
g_free (tmpdir);
}
#ifdef HOST_WIN32
g_strdelimit (abspath, '/', '\\');
#endif
abspath = g_strreverse (abspath);
backc = 0;
dest = lastpos = abspath;
pos = strchr (lastpos, G_DIR_SEPARATOR);
while (pos != NULL) {
int len = pos - lastpos;
if (len == 1 && lastpos [0] == '.') {
// nop
} else if (len == 2 && lastpos [0] == '.' && lastpos [1] == '.') {
backc++;
} else if (len > 0) {
if (backc > 0) {
backc--;
} else {
if (dest != lastpos)
/* The two strings can overlap */
memmove (dest, lastpos, len + 1);
dest += len + 1;
}
}
lastpos = pos + 1;
pos = strchr (lastpos, G_DIR_SEPARATOR);
}
#ifdef HOST_WIN32
/* Avoid removing the first '\' for UNC paths. We must make sure that it's indeed an UNC path
by checking if the \\ pair happens exactly at the end of the string.
*/
if (*(lastpos-1) == G_DIR_SEPARATOR && *(lastpos-2) == G_DIR_SEPARATOR && *lastpos == 0)
lastpos = lastpos-1;
#endif
if (dest != lastpos) strcpy (dest, lastpos);
g_strreverse (abspath);
/* We strip away all trailing dir separators. This is not correct for the root directory,
* since we'll return an empty string, so re-append a dir separator if there is none in the
* result */
if (strchr (abspath, G_DIR_SEPARATOR) == NULL) {
int len = strlen (abspath);
abspath = (gchar *) g_realloc (abspath, len + 2);
abspath [len] = G_DIR_SEPARATOR;
abspath [len+1] = 0;
}
return abspath;
}
/*
* This ensures that the path that we store points to the final file
* not a path to a symlink.
*/
#if !defined(HOST_NO_SYMLINKS)
static gchar *
resolve_symlink (const char *path)
{
char *p, *concat, *dir;
char buffer [PATH_MAX+1];
int n, iterations = 0;
p = g_strdup (path);
do {
iterations++;
n = readlink (p, buffer, sizeof (buffer)-1);
if (n < 0){
char *copy = p;
p = mono_path_canonicalize (copy);
g_free (copy);
return p;
}
buffer [n] = 0;
if (!g_path_is_absolute (buffer)) {
dir = g_path_get_dirname (p);
concat = g_build_filename (dir, buffer, (const char*)NULL);
g_free (dir);
} else {
concat = g_strdup (buffer);
}
g_free (p);
p = mono_path_canonicalize (concat);
g_free (concat);
} while (iterations < MAXSYMLINKS);
return p;
}
#endif
gchar *
mono_path_resolve_symlinks (const char *path)
{
#if defined(HOST_NO_SYMLINKS)
return mono_path_canonicalize (path);
#else
gchar **split = g_strsplit (path, G_DIR_SEPARATOR_S, -1);
gchar *p = g_strdup ("");
int i;
for (i = 0; split [i] != NULL; i++) {
gchar *tmp = NULL;
// resolve_symlink of "" goes into canonicalize which resolves to cwd
if (strcmp (split [i], "") != 0) {
tmp = g_strdup_printf ("%s%s", p, split [i]);
g_free (p);
p = resolve_symlink (tmp);
g_free (tmp);
}
if (split [i+1] != NULL) {
tmp = g_strdup_printf ("%s%s", p, G_DIR_SEPARATOR_S);
g_free (p);
p = tmp;
}
}
g_strfreev (split);
return p;
#endif
}
static gboolean
mono_path_char_is_separator (char ch)
{
#ifdef HOST_WIN32
return ch == '/' || ch == '\\';
#else
return ch == '/';
#endif
}
static gboolean
mono_path_contains_separator (const char *path, size_t length)
{
for (size_t i = 0; i < length; ++i) {
if (mono_path_char_is_separator (path [i]))
return TRUE;
}
return FALSE;
}
static void
mono_path_remove_trailing_path_separators (const char *path, size_t *length)
{
size_t i = *length;
while (i > 0 && mono_path_char_is_separator (path [i - 1]))
i -= 1;
*length = i;
}
#ifdef HOST_WIN32
static gboolean
mono_path_char_is_lowercase (char ch)
{
return ch >= 'a' && ch <= 'z';
}
// Version-specific unichar2 upcase tables are stored per-volume at NTFS format-time.
// This is just a subset.
static char
mono_path_char_upcase (char a)
{
return mono_path_char_is_lowercase (a) ? (char)(a - 'a' + 'A') : a;
}
static gboolean
mono_path_char_equal (char a, char b)
{
return a == b
|| mono_path_char_upcase (a) == mono_path_char_upcase (b)
|| (mono_path_char_is_separator (a) && mono_path_char_is_separator (b));
}
#endif
static gboolean
mono_path_equal (const char *a, const char *b, size_t length)
{
#ifdef HOST_WIN32
size_t i = 0;
for (i = 0; i < length && mono_path_char_equal (a [i], b [i]); ++i) {
// nothing
}
return i == length;
#else
return memcmp (a, b, length) == 0;
#endif
}
static size_t
mono_path_path_separator_length (const char *a, size_t length)
{
size_t i = 0;
while (i < length && mono_path_char_is_separator (a [i]))
++i;
return i;
}
/**
* mono_path_filename_in_basedir:
*
* Return \c TRUE if \p filename is "immediately" in \p basedir
*
* Both paths should be absolute and be mostly normalized.
* If the file is in a subdirectory of \p basedir, returns \c FALSE.
* This function doesn't touch a filesystem, it looks solely at path names.
*
* In fact, filename might not be absolute, in which case, FALSE.
* Ditto basedir.
*
* To belabor the intent:
* /1/2/3 is considered to be in /1/2
* /1/2/3/4 is not considered be in /1/2
*
* Besides a "slash sensitive" prefix match, also check for
* additional slashes.
*
* "Slash sensitive" prefix match means:
* /a/b is a prefix of /a/b/
* /a/b is not a prefix of /a/bc
* /a/b is maybe a prefix of /a/b
* The string being checked against must either end, or continue with a path separator.
* "Normal" prefix matching would be true for both.
*
* This function also considers runs of slashes to be equivalent to single slashes,
* which is generally Windows behavior, except at the start of a path.
*/
gboolean
mono_path_filename_in_basedir (const char *filename, const char *basedir)
{
g_assert (filename);
g_assert (basedir);
size_t filename_length = strlen (filename);
size_t basedir_length = strlen (basedir);
if (!mono_path_contains_separator (filename, filename_length))
return FALSE;
if (!mono_path_contains_separator (basedir, basedir_length))
return FALSE;
//g_assertf (mono_path_contains_separator (filename, filename_length), "filename:%s basedir:%s", filename, basedir);
//g_assertf (mono_path_contains_separator (basedir, basedir_length), "filename:%s basedir:%s", filename, basedir);
mono_path_remove_trailing_path_separators (filename, &filename_length);
mono_path_remove_trailing_path_separators (basedir, &basedir_length);
// basedir_length can be 0 at this point and that is ok.
if (!filename_length
|| filename_length <= basedir_length
|| (basedir_length && !mono_path_equal (filename, basedir, basedir_length)))
return FALSE;
// /foo/1 is in /foo.
// /foo//1 is in /foo.
// /foo/1/ is in /foo.
// /foo//1/ is in /foo.
// /foo//1// is in /foo.
// /foo is not in /foo.
// /foo/ is not in /foo.
// /foob is not in /foo.
// /foo/1/2 is not in /foo.
// Skip basedir's length within filename.
const char *after_base = &filename [basedir_length];
size_t after_base_length = filename_length - basedir_length;
// Skip any number of slashes.
size_t skip_separators = mono_path_path_separator_length (after_base, after_base_length);
after_base += skip_separators;
after_base_length -= skip_separators;
// There must been at least one slash, and then after any non-slashes,
// there must not be any more slashes.
return skip_separators && !mono_path_contains_separator (after_base, after_base_length);
}
| /**
* \file
* Routines for handling path names.
*
* Authors:
* Gonzalo Paniagua Javier ([email protected])
* Miguel de Icaza ([email protected])
*
* (C) 2006 Novell, Inc. http://www.novell.com
*
*/
#include <config.h>
#include <glib.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
/* This is only needed for the mono_path_canonicalize code, MAXSYMLINKS, could be moved */
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#include "mono-path.h"
/* Embedded systems lack MAXSYMLINKS */
#ifndef MAXSYMLINKS
#define MAXSYMLINKS 3
#endif
/* Resolves '..' and '.' references in a path. If the path provided is relative,
* it will be relative to the current directory */
/* For Native Client, the above is not true. Since there is no getcwd we fill */
/* in the file being passed in relative to '.' and don't resolve it */
/* There are a couple of tests for this method in mono/test/mono-path.cs */
gchar *
mono_path_canonicalize (const char *path)
{
gchar *abspath, *pos, *lastpos, *dest;
int backc;
if (g_path_is_absolute (path)) {
abspath = g_strdup (path);
} else {
gchar *tmpdir = g_get_current_dir ();
abspath = g_build_filename (tmpdir, path, (const char*)NULL);
g_free (tmpdir);
}
#ifdef HOST_WIN32
g_strdelimit (abspath, '/', '\\');
#endif
abspath = g_strreverse (abspath);
backc = 0;
dest = lastpos = abspath;
pos = strchr (lastpos, G_DIR_SEPARATOR);
while (pos != NULL) {
int len = pos - lastpos;
if (len == 1 && lastpos [0] == '.') {
// nop
} else if (len == 2 && lastpos [0] == '.' && lastpos [1] == '.') {
backc++;
} else if (len > 0) {
if (backc > 0) {
backc--;
} else {
if (dest != lastpos)
/* The two strings can overlap */
memmove (dest, lastpos, len + 1);
dest += len + 1;
}
}
lastpos = pos + 1;
pos = strchr (lastpos, G_DIR_SEPARATOR);
}
#ifdef HOST_WIN32
/* Avoid removing the first '\' for UNC paths. We must make sure that it's indeed an UNC path
by checking if the \\ pair happens exactly at the end of the string.
*/
if (*(lastpos-1) == G_DIR_SEPARATOR && *(lastpos-2) == G_DIR_SEPARATOR && *lastpos == 0)
lastpos = lastpos-1;
#endif
if (dest != lastpos) strcpy (dest, lastpos);
g_strreverse (abspath);
/* We strip away all trailing dir separators. This is not correct for the root directory,
* since we'll return an empty string, so re-append a dir separator if there is none in the
* result */
if (strchr (abspath, G_DIR_SEPARATOR) == NULL) {
int len = strlen (abspath);
abspath = (gchar *) g_realloc (abspath, len + 2);
abspath [len] = G_DIR_SEPARATOR;
abspath [len+1] = 0;
}
return abspath;
}
/*
* This ensures that the path that we store points to the final file
* not a path to a symlink.
*/
#if !defined(HOST_NO_SYMLINKS)
static gchar *
resolve_symlink (const char *path)
{
char *p, *concat, *dir;
char buffer [PATH_MAX+1];
int n, iterations = 0;
p = g_strdup (path);
do {
iterations++;
n = readlink (p, buffer, sizeof (buffer)-1);
if (n < 0){
char *copy = p;
p = mono_path_canonicalize (copy);
g_free (copy);
return p;
}
buffer [n] = 0;
if (!g_path_is_absolute (buffer)) {
dir = g_path_get_dirname (p);
concat = g_build_filename (dir, buffer, (const char*)NULL);
g_free (dir);
} else {
concat = g_strdup (buffer);
}
g_free (p);
p = mono_path_canonicalize (concat);
g_free (concat);
} while (iterations < MAXSYMLINKS);
return p;
}
#endif
gchar *
mono_path_resolve_symlinks (const char *path)
{
#if defined(HOST_NO_SYMLINKS)
return mono_path_canonicalize (path);
#else
gchar **split = g_strsplit (path, G_DIR_SEPARATOR_S, -1);
gchar *p = g_strdup ("");
int i;
for (i = 0; split [i] != NULL; i++) {
gchar *tmp = NULL;
// resolve_symlink of "" goes into canonicalize which resolves to cwd
if (strcmp (split [i], "") != 0) {
tmp = g_strdup_printf ("%s%s", p, split [i]);
g_free (p);
p = resolve_symlink (tmp);
g_free (tmp);
}
if (split [i+1] != NULL) {
tmp = g_strdup_printf ("%s%s", p, G_DIR_SEPARATOR_S);
g_free (p);
p = tmp;
}
}
g_strfreev (split);
return p;
#endif
}
static gboolean
mono_path_char_is_separator (char ch)
{
#ifdef HOST_WIN32
return ch == '/' || ch == '\\';
#else
return ch == '/';
#endif
}
static gboolean
mono_path_contains_separator (const char *path, size_t length)
{
for (size_t i = 0; i < length; ++i) {
if (mono_path_char_is_separator (path [i]))
return TRUE;
}
return FALSE;
}
static void
mono_path_remove_trailing_path_separators (const char *path, size_t *length)
{
size_t i = *length;
while (i > 0 && mono_path_char_is_separator (path [i - 1]))
i -= 1;
*length = i;
}
#ifdef HOST_WIN32
static gboolean
mono_path_char_is_lowercase (char ch)
{
return ch >= 'a' && ch <= 'z';
}
// Version-specific unichar2 upcase tables are stored per-volume at NTFS format-time.
// This is just a subset.
static char
mono_path_char_upcase (char a)
{
return mono_path_char_is_lowercase (a) ? (char)(a - 'a' + 'A') : a;
}
static gboolean
mono_path_char_equal (char a, char b)
{
return a == b
|| mono_path_char_upcase (a) == mono_path_char_upcase (b)
|| (mono_path_char_is_separator (a) && mono_path_char_is_separator (b));
}
#endif
static gboolean
mono_path_equal (const char *a, const char *b, size_t length)
{
#ifdef HOST_WIN32
size_t i = 0;
for (i = 0; i < length && mono_path_char_equal (a [i], b [i]); ++i) {
// nothing
}
return i == length;
#else
return memcmp (a, b, length) == 0;
#endif
}
static size_t
mono_path_path_separator_length (const char *a, size_t length)
{
size_t i = 0;
while (i < length && mono_path_char_is_separator (a [i]))
++i;
return i;
}
/**
* mono_path_filename_in_basedir:
*
* Return \c TRUE if \p filename is "immediately" in \p basedir
*
* Both paths should be absolute and be mostly normalized.
* If the file is in a subdirectory of \p basedir, returns \c FALSE.
* This function doesn't touch a filesystem, it looks solely at path names.
*
* In fact, filename might not be absolute, in which case, FALSE.
* Ditto basedir.
*
* To belabor the intent:
* /1/2/3 is considered to be in /1/2
* /1/2/3/4 is not considered be in /1/2
*
* Besides a "slash sensitive" prefix match, also check for
* additional slashes.
*
* "Slash sensitive" prefix match means:
* /a/b is a prefix of /a/b/
* /a/b is not a prefix of /a/bc
* /a/b is maybe a prefix of /a/b
* The string being checked against must either end, or continue with a path separator.
* "Normal" prefix matching would be true for both.
*
* This function also considers runs of slashes to be equivalent to single slashes,
* which is generally Windows behavior, except at the start of a path.
*/
gboolean
mono_path_filename_in_basedir (const char *filename, const char *basedir)
{
g_assert (filename);
g_assert (basedir);
size_t filename_length = strlen (filename);
size_t basedir_length = strlen (basedir);
if (!mono_path_contains_separator (filename, filename_length))
return FALSE;
if (!mono_path_contains_separator (basedir, basedir_length))
return FALSE;
//g_assertf (mono_path_contains_separator (filename, filename_length), "filename:%s basedir:%s", filename, basedir);
//g_assertf (mono_path_contains_separator (basedir, basedir_length), "filename:%s basedir:%s", filename, basedir);
mono_path_remove_trailing_path_separators (filename, &filename_length);
mono_path_remove_trailing_path_separators (basedir, &basedir_length);
// basedir_length can be 0 at this point and that is ok.
if (!filename_length
|| filename_length <= basedir_length
|| (basedir_length && !mono_path_equal (filename, basedir, basedir_length)))
return FALSE;
// /foo/1 is in /foo.
// /foo//1 is in /foo.
// /foo/1/ is in /foo.
// /foo//1/ is in /foo.
// /foo//1// is in /foo.
// /foo is not in /foo.
// /foo/ is not in /foo.
// /foob is not in /foo.
// /foo/1/2 is not in /foo.
// Skip basedir's length within filename.
const char *after_base = &filename [basedir_length];
size_t after_base_length = filename_length - basedir_length;
// Skip any number of slashes.
size_t skip_separators = mono_path_path_separator_length (after_base, after_base_length);
after_base += skip_separators;
after_base_length -= skip_separators;
// There must been at least one slash, and then after any non-slashes,
// there must not be any more slashes.
return skip_separators && !mono_path_contains_separator (after_base, after_base_length);
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/vm/rcwrefcache.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
/*============================================================
**
** Header: RCWRefCache.h
**
**
** Purpose: Defines RCWRefCache class
** This class maintains per-AppDomain cache that can be used
** by RCW to reference other CCWs
===========================================================*/
#ifndef _H_RCWREFCACHE_
#define _H_RCWREFCACHE_
#ifdef FEATURE_COMWRAPPERS
class RCWRefCache
{
public :
RCWRefCache(AppDomain *pAppDomain);
~RCWRefCache();
//
// Add a reference from obj1 to obj2
//
HRESULT AddReferenceFromObjectToObject(OBJECTREF obj1, OBJECTREF obj2);
//
// Reset dependent handle cache by assigning 0 to m_dwDepHndListFreeIndex.
//
void ResetDependentHandles();
//
// Shrink the dependent handle cache if necessary (will destroy handles) and clear unused handles.
//
void ShrinkDependentHandles();
private :
//
// Add obj1 -> obj2 reference using dependent handle
// May fail if OOM
//
HRESULT AddReferenceUsingDependentHandle(OBJECTREF obj1, OBJECTREF obj2);
private :
AppDomain *m_pAppDomain; // Domain
CQuickArrayList<OBJECTHANDLE> m_depHndList; // Internal DependentHandle cache
// non-NULL dependent handles followed by NULL slots
DWORD m_dwDepHndListFreeIndex; // The starting index where m_depHndList has available slots
DWORD m_dwShrinkHint; // Keep track of how many times we use less than half handles
};
#endif // FEATURE_COMWRAPPERS
#endif // _H_RCWREFCACHE_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
/*============================================================
**
** Header: RCWRefCache.h
**
**
** Purpose: Defines RCWRefCache class
** This class maintains per-AppDomain cache that can be used
** by RCW to reference other CCWs
===========================================================*/
#ifndef _H_RCWREFCACHE_
#define _H_RCWREFCACHE_
#ifdef FEATURE_COMWRAPPERS
class RCWRefCache
{
public :
RCWRefCache(AppDomain *pAppDomain);
~RCWRefCache();
//
// Add a reference from obj1 to obj2
//
HRESULT AddReferenceFromObjectToObject(OBJECTREF obj1, OBJECTREF obj2);
//
// Reset dependent handle cache by assigning 0 to m_dwDepHndListFreeIndex.
//
void ResetDependentHandles();
//
// Shrink the dependent handle cache if necessary (will destroy handles) and clear unused handles.
//
void ShrinkDependentHandles();
private :
//
// Add obj1 -> obj2 reference using dependent handle
// May fail if OOM
//
HRESULT AddReferenceUsingDependentHandle(OBJECTREF obj1, OBJECTREF obj2);
private :
AppDomain *m_pAppDomain; // Domain
CQuickArrayList<OBJECTHANDLE> m_depHndList; // Internal DependentHandle cache
// non-NULL dependent handles followed by NULL slots
DWORD m_dwDepHndListFreeIndex; // The starting index where m_depHndList has available slots
DWORD m_dwShrinkHint; // Keep track of how many times we use less than half handles
};
#endif // FEATURE_COMWRAPPERS
#endif // _H_RCWREFCACHE_
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/eglib/gmisc-win32.c | /*
* gmisc.c: Misc functions with no place to go (right now)
*
* Author:
* Aaron Bockover ([email protected])
*
* (C) 2006 Novell, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <config.h>
#include <stdlib.h>
#include <glib.h>
#include <windows.h>
#include <direct.h>
#include <io.h>
#include <assert.h>
#include "../utils/w32subset.h"
gboolean
g_hasenv (const gchar *variable)
{
return g_getenv (variable) != NULL;
}
gchar *
g_getenv(const gchar *variable)
{
gunichar2 *var, *buffer;
gchar* val = NULL;
gint32 buffer_size = 1024;
gint32 retval;
var = u8to16(variable);
// FIXME This should loop in case another thread is growing the data.
buffer = g_new (gunichar2, buffer_size);
retval = GetEnvironmentVariableW (var, buffer, buffer_size);
if (retval != 0) {
if (retval > buffer_size) {
g_free (buffer);
buffer_size = retval;
buffer = g_malloc(buffer_size*sizeof(gunichar2));
retval = GetEnvironmentVariableW (var, buffer, buffer_size);
}
val = u16to8 (buffer);
} else {
if (GetLastError () != ERROR_ENVVAR_NOT_FOUND){
val = g_malloc (1);
*val = 0;
}
}
g_free(var);
g_free(buffer);
return val;
}
gboolean
g_setenv(const gchar *variable, const gchar *value, gboolean overwrite)
{
gunichar2 *var, *val;
gboolean result;
var = u8to16(variable);
val = u8to16(value);
result = (SetEnvironmentVariableW(var, val) != 0) ? TRUE : FALSE;
g_free(var);
g_free(val);
return result;
}
#if HAVE_API_SUPPORT_WIN32_LOCAL_INFO || HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
gchar*
g_win32_getlocale(void)
{
gunichar2 buf[19];
gint ccBuf = 0;
#if HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
ccBuf = GetLocaleInfoEx (LOCALE_NAME_USER_DEFAULT, LOCALE_SISO639LANGNAME, buf, 9);
#elif HAVE_API_SUPPORT_WIN32_LOCAL_INFO
LCID lcid = GetThreadLocale();
ccBuf = GetLocaleInfoW(lcid, LOCALE_SISO639LANGNAME, buf, 9);
#endif
if (ccBuf != 0) {
buf[ccBuf - 1] = L'-';
#if HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
ccBuf = GetLocaleInfoEx (LOCALE_NAME_USER_DEFAULT, LOCALE_SISO3166CTRYNAME, buf + ccBuf, 9);
#elif HAVE_API_SUPPORT_WIN32_LOCAL_INFO
ccBuf = GetLocaleInfoW(lcid, LOCALE_SISO3166CTRYNAME, buf + ccBuf, 9);
#endif
assert (ccBuf <= 9);
}
// Check for failure.
if (ccBuf == 0)
buf[0] = L'\0';
return u16to8 (buf);
}
#elif !HAVE_EXTERN_DEFINED_WIN32_LOCAL_INFO && !HAVE_EXTERN_DEFINED_WIN32_LOCAL_INFO_EX
gchar*
g_win32_getlocale(void)
{
g_unsupported_api ("GetLocaleInfo, GetLocaleInfoEx");
SetLastError (ERROR_NOT_SUPPORTED);
return NULL;
}
#endif /* HAVE_API_SUPPORT_WIN32_LOCAL_INFO || HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX */
gboolean
g_path_is_absolute (const char *filename)
{
g_return_val_if_fail (filename != NULL, FALSE);
if (filename[0] != '\0' && filename[1] != '\0') {
if (filename[1] == ':' && filename[2] != '\0' &&
(filename[2] == '\\' || filename[2] == '/'))
return TRUE;
/* UNC paths */
else if (filename[0] == '\\' && filename[1] == '\\' &&
filename[2] != '\0')
return TRUE;
}
return FALSE;
}
#if _MSC_VER && HAVE_API_SUPPORT_WIN32_SH_GET_FOLDER_PATH
#include <shlobj.h>
static gchar*
g_get_known_folder_path (void)
{
gchar *folder_path = NULL;
PWSTR profile_path = NULL;
#ifdef __cplusplus
REFGUID folderid = FOLDERID_Profile;
#else
REFGUID folderid = &FOLDERID_Profile;
#endif
HRESULT hr = SHGetKnownFolderPath (folderid, KF_FLAG_DEFAULT, NULL, &profile_path);
if (SUCCEEDED(hr)) {
folder_path = u16to8 (profile_path);
CoTaskMemFree (profile_path);
}
return folder_path;
}
#elif !HAVE_EXTERN_DEFINED_WIN32_SH_GET_FOLDER_PATH
static inline gchar *
g_get_known_folder_path (void)
{
return NULL;
}
#endif /* HAVE_API_SUPPORT_WIN32_SH_GET_FOLDER_PATH */
const gchar *
g_get_home_dir (void)
{
gchar *home_dir = g_get_known_folder_path ();
if (!home_dir) {
home_dir = (gchar *) g_getenv ("USERPROFILE");
}
if (!home_dir) {
const gchar *drive = g_getenv ("HOMEDRIVE");
const gchar *path = g_getenv ("HOMEPATH");
if (drive && path) {
home_dir = g_malloc (strlen (drive) + strlen (path) + 1);
if (home_dir) {
sprintf (home_dir, "%s%s", drive, path);
}
}
g_free ((void*)drive);
g_free ((void*)path);
}
return home_dir;
}
const gchar *
g_get_user_name (void)
{
const char * retName = g_getenv ("USER");
if (!retName)
retName = g_getenv ("USERNAME");
return retName;
}
static const char *tmp_dir;
const gchar *
g_get_tmp_dir (void)
{
if (tmp_dir == NULL){
if (tmp_dir == NULL){
tmp_dir = g_getenv ("TMPDIR");
if (tmp_dir == NULL){
tmp_dir = g_getenv ("TMP");
if (tmp_dir == NULL){
tmp_dir = g_getenv ("TEMP");
if (tmp_dir == NULL)
tmp_dir = "C:\\temp";
}
}
}
}
return tmp_dir;
}
gchar *
g_get_current_dir (void)
{
gunichar2 *buffer = NULL;
gchar* val = NULL;
gint32 retval, buffer_size = MAX_PATH;
buffer = g_new (gunichar2, buffer_size);
retval = GetCurrentDirectoryW (buffer_size, buffer);
if (retval != 0) {
// the size might be larger than MAX_PATH
// https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=cmd
if (retval > buffer_size) {
buffer_size = retval;
buffer = g_realloc (buffer, buffer_size*sizeof(gunichar2));
retval = GetCurrentDirectoryW (buffer_size, buffer);
}
val = u16to8 (buffer);
} else {
if (GetLastError () != ERROR_ENVVAR_NOT_FOUND) {
val = g_malloc (1);
*val = 0;
}
}
g_free (buffer);
return val;
}
| /*
* gmisc.c: Misc functions with no place to go (right now)
*
* Author:
* Aaron Bockover ([email protected])
*
* (C) 2006 Novell, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <config.h>
#include <stdlib.h>
#include <glib.h>
#include <windows.h>
#include <direct.h>
#include <io.h>
#include <assert.h>
#include "../utils/w32subset.h"
gboolean
g_hasenv (const gchar *variable)
{
return g_getenv (variable) != NULL;
}
gchar *
g_getenv(const gchar *variable)
{
gunichar2 *var, *buffer;
gchar* val = NULL;
gint32 buffer_size = 1024;
gint32 retval;
var = u8to16(variable);
// FIXME This should loop in case another thread is growing the data.
buffer = g_new (gunichar2, buffer_size);
retval = GetEnvironmentVariableW (var, buffer, buffer_size);
if (retval != 0) {
if (retval > buffer_size) {
g_free (buffer);
buffer_size = retval;
buffer = g_malloc(buffer_size*sizeof(gunichar2));
retval = GetEnvironmentVariableW (var, buffer, buffer_size);
}
val = u16to8 (buffer);
} else {
if (GetLastError () != ERROR_ENVVAR_NOT_FOUND){
val = g_malloc (1);
*val = 0;
}
}
g_free(var);
g_free(buffer);
return val;
}
gboolean
g_setenv(const gchar *variable, const gchar *value, gboolean overwrite)
{
gunichar2 *var, *val;
gboolean result;
var = u8to16(variable);
val = u8to16(value);
result = (SetEnvironmentVariableW(var, val) != 0) ? TRUE : FALSE;
g_free(var);
g_free(val);
return result;
}
#if HAVE_API_SUPPORT_WIN32_LOCAL_INFO || HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
gchar*
g_win32_getlocale(void)
{
gunichar2 buf[19];
gint ccBuf = 0;
#if HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
ccBuf = GetLocaleInfoEx (LOCALE_NAME_USER_DEFAULT, LOCALE_SISO639LANGNAME, buf, 9);
#elif HAVE_API_SUPPORT_WIN32_LOCAL_INFO
LCID lcid = GetThreadLocale();
ccBuf = GetLocaleInfoW(lcid, LOCALE_SISO639LANGNAME, buf, 9);
#endif
if (ccBuf != 0) {
buf[ccBuf - 1] = L'-';
#if HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX
ccBuf = GetLocaleInfoEx (LOCALE_NAME_USER_DEFAULT, LOCALE_SISO3166CTRYNAME, buf + ccBuf, 9);
#elif HAVE_API_SUPPORT_WIN32_LOCAL_INFO
ccBuf = GetLocaleInfoW(lcid, LOCALE_SISO3166CTRYNAME, buf + ccBuf, 9);
#endif
assert (ccBuf <= 9);
}
// Check for failure.
if (ccBuf == 0)
buf[0] = L'\0';
return u16to8 (buf);
}
#elif !HAVE_EXTERN_DEFINED_WIN32_LOCAL_INFO && !HAVE_EXTERN_DEFINED_WIN32_LOCAL_INFO_EX
gchar*
g_win32_getlocale(void)
{
g_unsupported_api ("GetLocaleInfo, GetLocaleInfoEx");
SetLastError (ERROR_NOT_SUPPORTED);
return NULL;
}
#endif /* HAVE_API_SUPPORT_WIN32_LOCAL_INFO || HAVE_API_SUPPORT_WIN32_LOCAL_INFO_EX */
gboolean
g_path_is_absolute (const char *filename)
{
g_return_val_if_fail (filename != NULL, FALSE);
if (filename[0] != '\0' && filename[1] != '\0') {
if (filename[1] == ':' && filename[2] != '\0' &&
(filename[2] == '\\' || filename[2] == '/'))
return TRUE;
/* UNC paths */
else if (filename[0] == '\\' && filename[1] == '\\' &&
filename[2] != '\0')
return TRUE;
}
return FALSE;
}
#if _MSC_VER && HAVE_API_SUPPORT_WIN32_SH_GET_FOLDER_PATH
#include <shlobj.h>
static gchar*
g_get_known_folder_path (void)
{
gchar *folder_path = NULL;
PWSTR profile_path = NULL;
#ifdef __cplusplus
REFGUID folderid = FOLDERID_Profile;
#else
REFGUID folderid = &FOLDERID_Profile;
#endif
HRESULT hr = SHGetKnownFolderPath (folderid, KF_FLAG_DEFAULT, NULL, &profile_path);
if (SUCCEEDED(hr)) {
folder_path = u16to8 (profile_path);
CoTaskMemFree (profile_path);
}
return folder_path;
}
#elif !HAVE_EXTERN_DEFINED_WIN32_SH_GET_FOLDER_PATH
static inline gchar *
g_get_known_folder_path (void)
{
return NULL;
}
#endif /* HAVE_API_SUPPORT_WIN32_SH_GET_FOLDER_PATH */
const gchar *
g_get_home_dir (void)
{
gchar *home_dir = g_get_known_folder_path ();
if (!home_dir) {
home_dir = (gchar *) g_getenv ("USERPROFILE");
}
if (!home_dir) {
const gchar *drive = g_getenv ("HOMEDRIVE");
const gchar *path = g_getenv ("HOMEPATH");
if (drive && path) {
home_dir = g_malloc (strlen (drive) + strlen (path) + 1);
if (home_dir) {
sprintf (home_dir, "%s%s", drive, path);
}
}
g_free ((void*)drive);
g_free ((void*)path);
}
return home_dir;
}
const gchar *
g_get_user_name (void)
{
const char * retName = g_getenv ("USER");
if (!retName)
retName = g_getenv ("USERNAME");
return retName;
}
static const char *tmp_dir;
const gchar *
g_get_tmp_dir (void)
{
if (tmp_dir == NULL){
if (tmp_dir == NULL){
tmp_dir = g_getenv ("TMPDIR");
if (tmp_dir == NULL){
tmp_dir = g_getenv ("TMP");
if (tmp_dir == NULL){
tmp_dir = g_getenv ("TEMP");
if (tmp_dir == NULL)
tmp_dir = "C:\\temp";
}
}
}
}
return tmp_dir;
}
gchar *
g_get_current_dir (void)
{
gunichar2 *buffer = NULL;
gchar* val = NULL;
gint32 retval, buffer_size = MAX_PATH;
buffer = g_new (gunichar2, buffer_size);
retval = GetCurrentDirectoryW (buffer_size, buffer);
if (retval != 0) {
// the size might be larger than MAX_PATH
// https://docs.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=cmd
if (retval > buffer_size) {
buffer_size = retval;
buffer = g_realloc (buffer, buffer_size*sizeof(gunichar2));
retval = GetCurrentDirectoryW (buffer_size, buffer);
}
val = u16to8 (buffer);
} else {
if (GetLastError () != ERROR_ENVVAR_NOT_FOUND) {
val = g_malloc (1);
*val = 0;
}
}
g_free (buffer);
return val;
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/utils/mono-threads-debug.h | /**
* \file
*/
#ifndef __MONO_UTILS_MONO_THREADS_DEBUG_H__
#define __MONO_UTILS_MONO_THREADS_DEBUG_H__
#include <config.h>
#include <glib.h>
/* Logging - enable them below if you need specific logging for the category you need */
#define MOSTLY_ASYNC_SAFE_FPRINTF(handle, ...) do { \
g_async_safe_fprintf (handle, __VA_ARGS__); \
} while (0)
#define MOSTLY_ASYNC_SAFE_PRINTF(...) MOSTLY_ASYNC_SAFE_FPRINTF(1, __VA_ARGS__);
#if 1
#define THREADS_DEBUG(...)
#else
#define THREADS_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_STW_DEBUG(...)
#else
#define THREADS_STW_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_SUSPEND_DEBUG(...)
#else
#define THREADS_SUSPEND_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_STATE_MACHINE_DEBUG(...)
#else
#define THREADS_STATE_MACHINE_DEBUG_ENABLED
#define THREADS_STATE_MACHINE_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_INTERRUPT_DEBUG(...)
#else
#define THREADS_INTERRUPT_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#endif /* __MONO_UTILS_MONO_THREADS_DEBUG_H__ */
| /**
* \file
*/
#ifndef __MONO_UTILS_MONO_THREADS_DEBUG_H__
#define __MONO_UTILS_MONO_THREADS_DEBUG_H__
#include <config.h>
#include <glib.h>
/* Logging - enable them below if you need specific logging for the category you need */
#define MOSTLY_ASYNC_SAFE_FPRINTF(handle, ...) do { \
g_async_safe_fprintf (handle, __VA_ARGS__); \
} while (0)
#define MOSTLY_ASYNC_SAFE_PRINTF(...) MOSTLY_ASYNC_SAFE_FPRINTF(1, __VA_ARGS__);
#if 1
#define THREADS_DEBUG(...)
#else
#define THREADS_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_STW_DEBUG(...)
#else
#define THREADS_STW_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_SUSPEND_DEBUG(...)
#else
#define THREADS_SUSPEND_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_STATE_MACHINE_DEBUG(...)
#else
#define THREADS_STATE_MACHINE_DEBUG_ENABLED
#define THREADS_STATE_MACHINE_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#if 1
#define THREADS_INTERRUPT_DEBUG(...)
#else
#define THREADS_INTERRUPT_DEBUG MOSTLY_ASYNC_SAFE_PRINTF
#endif
#endif /* __MONO_UTILS_MONO_THREADS_DEBUG_H__ */
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/pal/inc/rt/olectl.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: olectl.h
//
// ===========================================================================
// dummy olectl.h for PAL
#include "palrt.h"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: olectl.h
//
// ===========================================================================
// dummy olectl.h for PAL
#include "palrt.h"
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/metadata/icall-table.h | /**
* \file
* Copyright 2016 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_METADATA_ICALL_TABLE_H__
#define __MONO_METADATA_ICALL_TABLE_H__
#include <config.h>
#include <glib.h>
#include <mono/utils/mono-publib.h>
#include "marshal.h"
#include "icalls.h"
#define MONO_ICALL_TABLE_CALLBACKS_VERSION 2
typedef struct {
int version;
gpointer (*lookup) (MonoMethod *method, char *classname, char *methodname, char *sigstart, gboolean *uses_handles);
const char* (*lookup_icall_symbol) (gpointer func);
} MonoIcallTableCallbacks;
ICALL_EXTERN_C
void
mono_install_icall_table_callbacks (const MonoIcallTableCallbacks *cb);
MONO_API void
mono_icall_table_init (void);
// It helps for types to be single tokens, though this can be relaxed in some places.
// Marshaling a "ptr" does nothing -- just pass it on unchanged.
// Marshaling a "ref" also does nothing at this layer, but
// creates a handle in marshal-ilgen.c.
// "ref" means "can be an interior pointer".
// "ptr" means "to a local".
// It is generally difficult to know, and "ref" is safer.
// Presently it does not matter.
typedef gint32 *gint32_ptr;
typedef gsize *gsize_ptr;
typedef guchar *guchar_ptr;
typedef const guchar *const_guchar_ptr;
typedef gpointer *gpointer_ptr;
typedef const char *const_char_ptr;
typedef char *char_ptr;
typedef char **char_ptr_ptr;
typedef gunichar2 *gunichar2_ptr;
typedef const gunichar2 *const_gunichar2_ptr;
typedef int *int_ptr;
typedef int **int_ptr_ref;
typedef guint8 **guint8_ptr_ref;
typedef GPtrArray *GPtrArray_ptr;
// HANDLE is not used just to avoid duplicate typedef warnings with some compilers.
// gpointer == void* == HANDLE == FILE_HANDLE == PROCESS_HANDLE.
typedef gpointer PROCESS_HANDLE;
typedef gpointer FILE_HANDLE;
typedef MonoAssemblyName *MonoAssemblyName_ptr;
typedef MonoBoolean *MonoBoolean_ptr;
typedef MonoClass *MonoClass_ptr;
typedef MonoClassField *MonoClassField_ptr;
typedef MonoEvent *MonoEvent_ptr;
typedef MonoImage *MonoImage_ptr;
typedef MonoMethod *MonoMethod_ptr;
typedef MonoProperty *MonoProperty_ptr;
typedef MonoPropertyInfo *MonoPropertyInfo_ref;
typedef MonoType *MonoType_ptr;
typedef MonoTypedRef *MonoTypedRef_ptr;
typedef MonoStackCrawlMark *MonoStackCrawlMark_ptr;
typedef MonoVTable *MonoVTable_ptr;
typedef unsigned *unsigned_ptr;
typedef mono_unichar2 *mono_unichar2_ptr;
typedef mono_unichar4 *mono_unichar4_ptr;
typedef MonoSpanOfObjects *MonoSpanOfObjects_ref;
typedef char **char_ptr_ref;
typedef gint32 *gint32_ref;
typedef gint64 *gint64_ref;
typedef gpointer *gpointer_ref;
typedef gsize *gsize_ref;
typedef guint32 *guint32_ref;
typedef guint64 *guint64_ref;
typedef int *int_ref;
typedef MonoAssemblyName *MonoAssemblyName_ref;
typedef MonoBoolean *MonoBoolean_ref;
typedef MonoClassField *MonoClassField_ref;
typedef MonoEvent *MonoEvent_ref;
typedef MonoEventInfo *MonoEventInfo_ref;
typedef MonoGenericParamInfo *MonoGenericParamInfo_ptr;
typedef MonoMethod *MonoMethod_ref;
typedef MonoMethodInfo *MonoMethodInfo_ref;
typedef MonoResolveTokenError *MonoResolveTokenError_ref;
typedef MonoType *MonoType_ref;
typedef MonoTypedRef *MonoTypedRef_ref;
// Maybe do this in TYPED_HANDLE_DECL.
typedef MonoArray MonoArrayOut;
typedef MonoArray MonoArrayInOut;
typedef MonoArrayHandle MonoArrayOutHandle;
typedef MonoArrayHandle MonoArrayInOutHandle;
typedef MonoException MonoExceptionOut;
typedef MonoExceptionHandle MonoExceptionOutHandle;
typedef MonoObject MonoObjectOut;
typedef MonoObject MonoObjectInOut;
typedef MonoObjectHandle MonoObjectOutHandle;
typedef MonoObjectHandle MonoObjectInOutHandle;
typedef MonoReflectionModule MonoReflectionModuleOut;
typedef MonoReflectionModuleHandle MonoReflectionModuleOutHandle;
typedef MonoString MonoStringOut;
typedef MonoStringHandle MonoStringOutHandle;
// How the arguments and return value of an icall should be wrapped.
// The names and meanings are from marshal-ilgen.c.
// ICALL_HANDLES_WRAP_NONE
// ICALL_HANDLES_WRAP_OBJ
// ICALL_HANDLES_WRAP_OBJ_INOUT
// ICALL_HANDLES_WRAP_OBJ_OUT
// ICALL_HANDLES_WRAP_VALUETYPE_REF
//
// In the present implementation, all that matters is, handle-or-not,
// in and out and inout are the same, and none and valuetype_ref are the same.
// Handle creation is in marshal-ilgen.c.
// Map a type to a type class: Void and above.
#define MONO_HANDLE_TYPE_WRAP_void Void
#define MONO_HANDLE_TYPE_WRAP_GPtrArray_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoBoolean ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_const_gunichar2_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gunichar2_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint32 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint64 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gpointer ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gconstpointer ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gsize ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gssize ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guchar_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guint ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_const_guchar_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guint32 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guint64 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_int ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_uint ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_PInfo ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_bstr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_bstr_const ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_unsigned_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_unichar2_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_unichar4_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoImage_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoClassField_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoMarshalNative ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoProperty_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_size_t ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoVTable_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoQCallTypeHandle ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoQCallAssemblyHandle ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoAssemblyName_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoBoolean_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoClassField_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoEvent_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoEventInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoMethod_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoMethodInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoPropertyInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoType_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoTypedRef_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gint32_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gint64_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gpointer_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gsize_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_guint32_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_guint64_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_int_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gint32_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_int_ptr_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_char_ptr_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_guint8_ptr_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoResolveTokenError_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoSpanOfObjects_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
// HANDLE is not used just to avoid duplicate typedef warnings with some compilers.
// gpointer == void* == HANDLE == FILE_HANDLE == PROCESS_HANDLE.
#define MONO_HANDLE_TYPE_WRAP_char_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_const_char_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_FILE_HANDLE ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoClass_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoEvent_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoGenericParamInfo_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoMethod_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoType_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoTypedRef_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoStackCrawlMark_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint32_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gpointer_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_PROCESS_HANDLE ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoObjectHandleOnStack ICALL_HANDLES_WRAP_NONE
// Please keep this sorted (grep ICALL_HANDLES_WRAP_OBJ$ | sort)
#define MONO_HANDLE_TYPE_WRAP_MonoAppContext ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoAppDomain ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoAppDomainSetup ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoArray ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoAsyncResult ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoCalendarData ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoComInteropProxy ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoComObject ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoCultureData ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoCultureInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoDelegate ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoException ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoInternalThread ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoIOSelectorJob ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoObject ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoManifestResourceInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoMulticastDelegate ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionAssembly ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionAssemblyBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionDynamicMethod ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionEvent ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMonoEvent ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionField ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMarshalAsAttribute ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMethod ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMethodBody ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionModule ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionModuleBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionParameter ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionProperty ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionSigHelper ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionType ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionTypeBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoRegionInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoString ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoStringBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoThreadObject ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoTransparentProxy ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoW32ProcessStartInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoExceptionOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoObjectOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoStringOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoArrayOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionModuleOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoW32ProcessInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
// These are rare, and could be eliminated.
// They could be return values, or just separate out parameters.
#define MONO_HANDLE_TYPE_WRAP_MonoObjectInOut ICALL_HANDLES_WRAP_OBJ_INOUT
#define MONO_HANDLE_TYPE_WRAP_MonoArrayInOut ICALL_HANDLES_WRAP_OBJ_INOUT
// Do macro_prefix for type type, mapping type to a type class.
// Note that the macro can further be followed by parameters.
#define MONO_HANDLE_DO3(macro_prefix, type) macro_prefix ## type
#define MONO_HANDLE_DO2(macro_prefix, type) MONO_HANDLE_DO3 (macro_prefix, type)
#define MONO_HANDLE_DO(macro_prefix, type) MONO_HANDLE_DO2 (macro_prefix, MONO_HANDLE_TYPE_WRAP_ ## type)
#define MONO_HANDLE_RETURN_BEGIN(type) MONO_HANDLE_DO (MONO_HANDLE_RETURN_BEGIN_, type) (type)
#define MONO_HANDLE_RETURN_BEGIN_Void(type) /* nothing */
#define MONO_HANDLE_RETURN_BEGIN_ICALL_HANDLES_WRAP_NONE(type) type icall_result =
#define MONO_HANDLE_RETURN_BEGIN_ICALL_HANDLES_WRAP_OBJ(type) type ## Handle icall_result =
#define MONO_HANDLE_RETURN_END(type) MONO_HANDLE_DO (MONO_HANDLE_RETURN_END_, type);
#define MONO_HANDLE_RETURN_END_Void HANDLE_FUNCTION_RETURN ()
#define MONO_HANDLE_RETURN_END_ICALL_HANDLES_WRAP_NONE HANDLE_FUNCTION_RETURN_VAL (icall_result)
#define MONO_HANDLE_RETURN_END_ICALL_HANDLES_WRAP_OBJ HANDLE_FUNCTION_RETURN_OBJ (icall_result)
// Convert raw handles to typed handles, just by casting and copying a pointer.
#define MONO_HANDLE_MARSHAL(type, n) MONO_HANDLE_DO (MONO_HANDLE_MARSHAL_, type) (type, n)
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_NONE(type, n) a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_OBJ(type, n) *(type ## Handle*)&a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) *(type ## Handle*)&a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) *(type ## Handle*)&a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) a ## n
// Declare and initialize a local for an object in, out, inout parameters, upon input.
#define MONO_HANDLE_REGISTER_ICALL_LOCALS(type, n) MONO_HANDLE_DO (MONO_HANDLE_REGISTER_ICALL_LOCALS_, type) (type, n)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_NONE(type, n) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_OBJ(type, n) type ## Handle a ## n = MONO_HANDLE_NEW (type, a ## n ## _raw);
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) unused_untested_looks_correct1 type ## Handle a ## n = MONO_HANDLE_NEW (type, NULL);
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) unused_untested_looks_correct2 type ## Handle a ## n = MONO_HANDLE_NEW (type, *a ## n ## _raw);
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) FIXME restore mono_icall_handle_new_interior from e8b037642104527bd9b9ba70d502210b9c12d2b8 \
type ## Handle a ## n = mono_icall_handle_new_interior (a ## n ## _raw);
// Produce all the locals, i.e. up to one per parameter.
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_0() /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_1(t0) MONO_HANDLE_REGISTER_ICALL_LOCALS (t0, 0)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_2(t0, t1) MONO_HANDLE_REGISTER_ICALL_LOCALS_1 (t0) MONO_HANDLE_REGISTER_ICALL_LOCALS (t1, 1)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_3(t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_LOCALS_2 (t0, t1) MONO_HANDLE_REGISTER_ICALL_LOCALS (t2, 2)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_4(t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_LOCALS_3 (t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_LOCALS (t3, 3)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_5(t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_LOCALS_4 (t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_LOCALS (t4, 4)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_LOCALS_5 (t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_LOCALS (t5, 5)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_LOCALS_6 (t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_LOCALS (t6, 6)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_LOCALS_7 (t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_LOCALS (t7, 7)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_REGISTER_ICALL_LOCALS_8 (t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_LOCALS (t8, 8)
// Convert a typed handle to raw pointer upon output.
#define MONO_HANDLE_REGISTER_ICALL_OUT(type, n) MONO_HANDLE_DO (MONO_HANDLE_REGISTER_ICALL_OUT_, type) (type, n)
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_NONE(type, n) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_OBJ(type, n) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) unused_untested_looks_correct3 *a ## n ## _raw = MONO_HANDLE_RAW (a ## n);
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_OBJ_INOUT unused_untested_looks_correct4 *a ## n ## _raw = MONO_HANDLE_RAW (a ## n);
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_VALUETYPE_REF(type, n) /* nothing */
// Convert all the typed handles to raw pointers upon output, i.e. up to one per parameter.
#define MONO_HANDLE_REGISTER_ICALL_OUT_0() /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_OUT_1(t0) MONO_HANDLE_REGISTER_ICALL_OUT (t0, 0)
#define MONO_HANDLE_REGISTER_ICALL_OUT_2(t0, t1) MONO_HANDLE_REGISTER_ICALL_OUT_1 (t0) MONO_HANDLE_REGISTER_ICALL_OUT (t1, 1)
#define MONO_HANDLE_REGISTER_ICALL_OUT_3(t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_OUT_2 (t0, t1) MONO_HANDLE_REGISTER_ICALL_OUT (t2, 2)
#define MONO_HANDLE_REGISTER_ICALL_OUT_4(t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_OUT_3 (t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_OUT (t3, 3)
#define MONO_HANDLE_REGISTER_ICALL_OUT_5(t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_OUT_4 (t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_OUT (t4, 4)
#define MONO_HANDLE_REGISTER_ICALL_OUT_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_OUT_5 (t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_OUT (t5, 5)
#define MONO_HANDLE_REGISTER_ICALL_OUT_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_OUT_6 (t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_OUT (t6, 6)
#define MONO_HANDLE_REGISTER_ICALL_OUT_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_OUT_7 (t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_OUT (t7, 7)
#define MONO_HANDLE_REGISTER_ICALL_OUT_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_REGISTER_ICALL_OUT_8 (t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_OUT (t8, 8)
#define MONO_HANDLE_TYPE_TYPED(type) MONO_HANDLE_DO (MONO_HANDLE_TYPE_TYPED_, type) (type)
#define MONO_HANDLE_TYPE_TYPED_Void(type) type
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_NONE(type) type
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_OBJ(type) type ## Handle
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_OBJ_OUT(type) type ## Handle
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_OBJ_INOUT(type) type ## Handle
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_VALUETYPE_REF(type) type
// Map a type to a raw handle, or itself.
#define MONO_HANDLE_TYPE_RAWHANDLE(type) MONO_HANDLE_DO (MONO_HANDLE_TYPE_RAWHANDLE_, type) (type)
#define MONO_HANDLE_TYPE_RAWHANDLE_Void(type) type
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_NONE(type) type
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ(type) MonoRawHandle
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_OUT(type) MonoRawHandle
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_INOUT(type) MonoRawHandle
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_VALUETYPE_REF(type) type
// Map a type to a raw pointer, or itself.
#define MONO_HANDLE_TYPE_RAWPOINTER(type) MONO_HANDLE_DO (MONO_HANDLE_TYPE_RAWPOINTER_, type) (type)
#define MONO_HANDLE_TYPE_RAWPOINTER_Void(type) type
#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_NONE(type) type
#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ(type) type*
// Only used for return types.
//#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_OUT(type) type*
//#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_INOUT(type) type*
#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_VALUETYPE_REF(type) type
// Type/name in raw handle prototype and implementation.
#define MONO_HANDLE_ARG_RAWHANDLE(type, n) MONO_HANDLE_DO (MONO_HANDLE_ARG_RAWHANDLE_, type) (type, n)
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_NONE(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
// Type/name in raw pointer prototype and implementation.
#define MONO_HANDLE_ARG_RAWPOINTER(type, n) MONO_HANDLE_DO (MONO_HANDLE_ARG_RAWPOINTER_, type) (type, n)
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_NONE(type, n) MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ(type, n) MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n ## _raw
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) unused_untested_looks_correct5 MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n ## _raw
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) unused_untested_looks_correct6 MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n ## _raw
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) FIXME //MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n
// Generate a parameter list, types only, for a function accepting/returning typed handles.
#define MONO_HANDLE_FOREACH_TYPE_TYPED_0() /* nothing */
#define MONO_HANDLE_FOREACH_TYPE_TYPED_1(t0) MONO_HANDLE_TYPE_TYPED (t0)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_2(t0, t1) MONO_HANDLE_FOREACH_TYPE_TYPED_1 (t0) ,MONO_HANDLE_TYPE_TYPED (t1)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_3(t0, t1, t2) MONO_HANDLE_FOREACH_TYPE_TYPED_2 (t0, t1) ,MONO_HANDLE_TYPE_TYPED (t2)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_4(t0, t1, t2, t3) MONO_HANDLE_FOREACH_TYPE_TYPED_3 (t0, t1, t2) ,MONO_HANDLE_TYPE_TYPED (t3)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_5(t0, t1, t2, t3, t4) MONO_HANDLE_FOREACH_TYPE_TYPED_4 (t0, t1, t2, t3) ,MONO_HANDLE_TYPE_TYPED (t4)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_FOREACH_TYPE_TYPED_5 (t0, t1, t2, t3, t4) ,MONO_HANDLE_TYPE_TYPED (t5)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_FOREACH_TYPE_TYPED_6 (t0, t1, t2, t3, t4, t5) ,MONO_HANDLE_TYPE_TYPED (t6)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_FOREACH_TYPE_TYPED_7 (t0, t1, t2, t3, t4, t5, t6) ,MONO_HANDLE_TYPE_TYPED (t7)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_FOREACH_TYPE_TYPED_8 (t0, t1, t2, t3, t4, t5, t6, t7) ,MONO_HANDLE_TYPE_TYPED (t8)
// Generate a parameter list, types and names, for a function accepting raw handles and no MonoError,
// and returning a raw pointer.
#define MONO_HANDLE_FOREACH_ARG_RAW_0() void
#define MONO_HANDLE_FOREACH_ARG_RAW_1(t0) MONO_HANDLE_ARG_RAWHANDLE (t0, 0)
#define MONO_HANDLE_FOREACH_ARG_RAW_2(t0, t1) MONO_HANDLE_FOREACH_ARG_RAW_1 (t0), MONO_HANDLE_ARG_RAWHANDLE (t1, 1)
#define MONO_HANDLE_FOREACH_ARG_RAW_3(t0, t1, t2) MONO_HANDLE_FOREACH_ARG_RAW_2 (t0, t1), MONO_HANDLE_ARG_RAWHANDLE (t2, 2)
#define MONO_HANDLE_FOREACH_ARG_RAW_4(t0, t1, t2, t3) MONO_HANDLE_FOREACH_ARG_RAW_3 (t0, t1, t2), MONO_HANDLE_ARG_RAWHANDLE (t3, 3)
#define MONO_HANDLE_FOREACH_ARG_RAW_5(t0, t1, t2, t3, t4) MONO_HANDLE_FOREACH_ARG_RAW_4 (t0, t1, t2, t3), MONO_HANDLE_ARG_RAWHANDLE (t4, 4)
#define MONO_HANDLE_FOREACH_ARG_RAW_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_FOREACH_ARG_RAW_5 (t0, t1, t2, t3, t4), MONO_HANDLE_ARG_RAWHANDLE (t5, 5)
#define MONO_HANDLE_FOREACH_ARG_RAW_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_FOREACH_ARG_RAW_6 (t0, t1, t2, t3, t4, t5), MONO_HANDLE_ARG_RAWHANDLE (t6, 6)
#define MONO_HANDLE_FOREACH_ARG_RAW_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_FOREACH_ARG_RAW_7 (t0, t1, t2, t3, t4, t5, t6), MONO_HANDLE_ARG_RAWHANDLE (t7, 7)
#define MONO_HANDLE_FOREACH_ARG_RAW_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_FOREACH_ARG_RAW_8 (t0, t1, t2, t3, t4, t5, t6, t7), MONO_HANDLE_ARG_RAWHANDLE (t8, 8)
// Generate a parameter list, types and names, for a function accepting raw pointers and no MonoError,
// and returning a raw pointer.
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_0() void
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_1(t0) MONO_HANDLE_ARG_RAWPOINTER (t0, 0)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_2(t0, t1) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_1 (t0), MONO_HANDLE_ARG_RAWPOINTER (t1, 1)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_3(t0, t1, t2) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_2 (t0, t1), MONO_HANDLE_ARG_RAWPOINTER (t2, 2)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_4(t0, t1, t2, t3) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_3 (t0, t1, t2), MONO_HANDLE_ARG_RAWPOINTER (t3, 3)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_5(t0, t1, t2, t3, t4) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_4 (t0, t1, t2, t3), MONO_HANDLE_ARG_RAWPOINTER (t4, 4)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_5 (t0, t1, t2, t3, t4), MONO_HANDLE_ARG_RAWPOINTER (t5, 5)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_6 (t0, t1, t2, t3, t4, t5), MONO_HANDLE_ARG_RAWPOINTER (t6, 6)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_7 (t0, t1, t2, t3, t4, t5, t6), MONO_HANDLE_ARG_RAWPOINTER (t7, 7)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_8 (t0, t1, t2, t3, t4, t5, t6, t7), MONO_HANDLE_ARG_RAWPOINTER (t8, 8)
#define MONO_HANDLE_REGISTER_ICALL_CALL_0 /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_CALL_1 a0,
#define MONO_HANDLE_REGISTER_ICALL_CALL_2 MONO_HANDLE_REGISTER_ICALL_CALL_1 a1,
#define MONO_HANDLE_REGISTER_ICALL_CALL_3 MONO_HANDLE_REGISTER_ICALL_CALL_2 a2,
#define MONO_HANDLE_REGISTER_ICALL_CALL_4 MONO_HANDLE_REGISTER_ICALL_CALL_3 a3,
#define MONO_HANDLE_REGISTER_ICALL_CALL_5 MONO_HANDLE_REGISTER_ICALL_CALL_4 a4,
#define MONO_HANDLE_REGISTER_ICALL_CALL_6 MONO_HANDLE_REGISTER_ICALL_CALL_5 a5,
#define MONO_HANDLE_REGISTER_ICALL_CALL_7 MONO_HANDLE_REGISTER_ICALL_CALL_6 a6,
#define MONO_HANDLE_REGISTER_ICALL_CALL_8 MONO_HANDLE_REGISTER_ICALL_CALL_7 a7,
#define MONO_HANDLE_REGISTER_ICALL_CALL_9 MONO_HANDLE_REGISTER_ICALL_CALL_8 a8,
// Call from the wrapper to the actual icall, passing on the
// WRAP_NONE parameters directly, casting handles from raw to typed.
#define MONO_HANDLE_CALL_0() /* nothing */
#define MONO_HANDLE_CALL_1(t0) MONO_HANDLE_MARSHAL (t0, 0)
#define MONO_HANDLE_CALL_2(t0, t1) MONO_HANDLE_CALL_1 (t0), MONO_HANDLE_MARSHAL (t1, 1)
#define MONO_HANDLE_CALL_3(t0, t1, t2) MONO_HANDLE_CALL_2 (t0, t1), MONO_HANDLE_MARSHAL (t2, 2)
#define MONO_HANDLE_CALL_4(t0, t1, t2, t3) MONO_HANDLE_CALL_3 (t0, t1, t2), MONO_HANDLE_MARSHAL (t3, 3)
#define MONO_HANDLE_CALL_5(t0, t1, t2, t3, t4) MONO_HANDLE_CALL_4 (t0, t1, t2, t3), MONO_HANDLE_MARSHAL (t4, 4)
#define MONO_HANDLE_CALL_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_CALL_5 (t0, t1, t2, t3, t4), MONO_HANDLE_MARSHAL (t5, 5)
#define MONO_HANDLE_CALL_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_CALL_6 (t0, t1, t2, t3, t4, t5), MONO_HANDLE_MARSHAL (t6, 6)
#define MONO_HANDLE_CALL_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_CALL_7 (t0, t1, t2, t3, t4, t5, t6), MONO_HANDLE_MARSHAL (t7, 7)
#define MONO_HANDLE_CALL_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_CALL_8 (t0, t1, t2, t3, t4, t5, t6, t7), MONO_HANDLE_MARSHAL (t8, 8)
// Place a comma after a parameter list of length n, i.e. nothing for 0, else comma.
#define MONO_HANDLE_COMMA_0 /* nothing */
#define MONO_HANDLE_COMMA_1 ,
#define MONO_HANDLE_COMMA_2 ,
#define MONO_HANDLE_COMMA_3 ,
#define MONO_HANDLE_COMMA_4 ,
#define MONO_HANDLE_COMMA_5 ,
#define MONO_HANDLE_COMMA_6 ,
#define MONO_HANDLE_COMMA_7 ,
#define MONO_HANDLE_COMMA_8 ,
#define MONO_HANDLE_COMMA_9 ,
// Declare the function that takes/returns typed handles and a MonoError.
#define MONO_HANDLE_DECLARE(id, name, func, rettype, n, argtypes) \
MONO_HANDLE_TYPE_TYPED (rettype) \
func (MONO_HANDLE_FOREACH_TYPE_TYPED_ ## n argtypes MONO_HANDLE_COMMA_ ## n MonoError *error)
// Declare the function wrapper that takes raw handles and returns a raw pointer.
#define MONO_HANDLE_DECLARE_RAW(id, name, func, rettype, n, argtypes) \
ICALL_EXPORT MONO_HANDLE_TYPE_RAWPOINTER (rettype) \
func ## _raw ( MONO_HANDLE_FOREACH_ARG_RAW_ ## n argtypes)
// Implement ves_icall_foo_raw over ves_icall_foo.
// Raw handles are converted to/from typed handles and the rest is passed through.
// This is for functions in icall-def.h.
#define MONO_HANDLE_IMPLEMENT(id, name, func, rettype, n, argtypes) \
\
MONO_HANDLE_DECLARE_RAW (id, name, func, rettype, n, argtypes) \
{ \
HANDLE_FUNCTION_ENTER (); \
\
ERROR_DECL (error); \
\
MONO_HANDLE_RETURN_BEGIN (rettype) \
\
func (MONO_HANDLE_CALL_ ## n argtypes MONO_HANDLE_COMMA_ ## n error); \
\
mono_error_set_pending_exception (error); \
\
MONO_HANDLE_RETURN_END (rettype) \
} \
// Declare the function that takes/returns raw pointers and no MonoError.
#define MONO_HANDLE_REGISTER_ICALL_DECLARE_RAW(func, rettype, n, argtypes) \
ICALL_EXPORT MONO_HANDLE_TYPE_RAWPOINTER (rettype) \
func ( MONO_HANDLE_FOREACH_ARG_RAWPOINTER_ ## n argtypes)
// Implement ves_icall_foo over ves_icall_foo_impl.
//
// Raw pointers are converted to/from handles and the rest is passed through.
// The in/out/inout-ness of parameters must be correct. (unlike MONO_HANDLE_IMPLEMENT)
// Valuetype-refs are not handled. (unlike MONO_HANDLE_IMPLEMENT)
// Handle creation is less efficient than MONO_HANDLE_IMPLEMENT (marshal-ilgen.c) -- using TLS
// and per-handle work.
//
// In future this should produce an array of IcallHandlesWrap and send that through
// to emit_native_icall_wrapper_ilgen to gain its efficient handles.
//
// Or put the handles directly in the coop frame, or pointers to them.
// i.e. one TLS access at function start and end.
//
// This is for functions passed to mono_register_jit_icall_info, etc.
#define MONO_HANDLE_REGISTER_ICALL_IMPLEMENT(func, rettype, n, argtypes) \
\
MONO_HANDLE_REGISTER_ICALL_DECLARE_RAW (func, rettype, n, argtypes) \
{ \
HANDLE_FUNCTION_ENTER (); \
\
ERROR_DECL (error); \
\
MONO_HANDLE_REGISTER_ICALL_LOCALS_ ## n argtypes \
\
MONO_HANDLE_RETURN_BEGIN (rettype) \
\
func ## _impl (MONO_HANDLE_REGISTER_ICALL_CALL_ ## n error); \
\
MONO_HANDLE_REGISTER_ICALL_OUT_ ## n argtypes \
\
mono_error_set_pending_exception (error); \
\
MONO_HANDLE_RETURN_END (rettype) \
} \
#endif
| /**
* \file
* Copyright 2016 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_METADATA_ICALL_TABLE_H__
#define __MONO_METADATA_ICALL_TABLE_H__
#include <config.h>
#include <glib.h>
#include <mono/utils/mono-publib.h>
#include "marshal.h"
#include "icalls.h"
#define MONO_ICALL_TABLE_CALLBACKS_VERSION 2
typedef struct {
int version;
gpointer (*lookup) (MonoMethod *method, char *classname, char *methodname, char *sigstart, gboolean *uses_handles);
const char* (*lookup_icall_symbol) (gpointer func);
} MonoIcallTableCallbacks;
ICALL_EXTERN_C
void
mono_install_icall_table_callbacks (const MonoIcallTableCallbacks *cb);
MONO_API void
mono_icall_table_init (void);
// It helps for types to be single tokens, though this can be relaxed in some places.
// Marshaling a "ptr" does nothing -- just pass it on unchanged.
// Marshaling a "ref" also does nothing at this layer, but
// creates a handle in marshal-ilgen.c.
// "ref" means "can be an interior pointer".
// "ptr" means "to a local".
// It is generally difficult to know, and "ref" is safer.
// Presently it does not matter.
typedef gint32 *gint32_ptr;
typedef gsize *gsize_ptr;
typedef guchar *guchar_ptr;
typedef const guchar *const_guchar_ptr;
typedef gpointer *gpointer_ptr;
typedef const char *const_char_ptr;
typedef char *char_ptr;
typedef char **char_ptr_ptr;
typedef gunichar2 *gunichar2_ptr;
typedef const gunichar2 *const_gunichar2_ptr;
typedef int *int_ptr;
typedef int **int_ptr_ref;
typedef guint8 **guint8_ptr_ref;
typedef GPtrArray *GPtrArray_ptr;
// HANDLE is not used just to avoid duplicate typedef warnings with some compilers.
// gpointer == void* == HANDLE == FILE_HANDLE == PROCESS_HANDLE.
typedef gpointer PROCESS_HANDLE;
typedef gpointer FILE_HANDLE;
typedef MonoAssemblyName *MonoAssemblyName_ptr;
typedef MonoBoolean *MonoBoolean_ptr;
typedef MonoClass *MonoClass_ptr;
typedef MonoClassField *MonoClassField_ptr;
typedef MonoEvent *MonoEvent_ptr;
typedef MonoImage *MonoImage_ptr;
typedef MonoMethod *MonoMethod_ptr;
typedef MonoProperty *MonoProperty_ptr;
typedef MonoPropertyInfo *MonoPropertyInfo_ref;
typedef MonoType *MonoType_ptr;
typedef MonoTypedRef *MonoTypedRef_ptr;
typedef MonoStackCrawlMark *MonoStackCrawlMark_ptr;
typedef MonoVTable *MonoVTable_ptr;
typedef unsigned *unsigned_ptr;
typedef mono_unichar2 *mono_unichar2_ptr;
typedef mono_unichar4 *mono_unichar4_ptr;
typedef MonoSpanOfObjects *MonoSpanOfObjects_ref;
typedef char **char_ptr_ref;
typedef gint32 *gint32_ref;
typedef gint64 *gint64_ref;
typedef gpointer *gpointer_ref;
typedef gsize *gsize_ref;
typedef guint32 *guint32_ref;
typedef guint64 *guint64_ref;
typedef int *int_ref;
typedef MonoAssemblyName *MonoAssemblyName_ref;
typedef MonoBoolean *MonoBoolean_ref;
typedef MonoClassField *MonoClassField_ref;
typedef MonoEvent *MonoEvent_ref;
typedef MonoEventInfo *MonoEventInfo_ref;
typedef MonoGenericParamInfo *MonoGenericParamInfo_ptr;
typedef MonoMethod *MonoMethod_ref;
typedef MonoMethodInfo *MonoMethodInfo_ref;
typedef MonoResolveTokenError *MonoResolveTokenError_ref;
typedef MonoType *MonoType_ref;
typedef MonoTypedRef *MonoTypedRef_ref;
// Maybe do this in TYPED_HANDLE_DECL.
typedef MonoArray MonoArrayOut;
typedef MonoArray MonoArrayInOut;
typedef MonoArrayHandle MonoArrayOutHandle;
typedef MonoArrayHandle MonoArrayInOutHandle;
typedef MonoException MonoExceptionOut;
typedef MonoExceptionHandle MonoExceptionOutHandle;
typedef MonoObject MonoObjectOut;
typedef MonoObject MonoObjectInOut;
typedef MonoObjectHandle MonoObjectOutHandle;
typedef MonoObjectHandle MonoObjectInOutHandle;
typedef MonoReflectionModule MonoReflectionModuleOut;
typedef MonoReflectionModuleHandle MonoReflectionModuleOutHandle;
typedef MonoString MonoStringOut;
typedef MonoStringHandle MonoStringOutHandle;
// How the arguments and return value of an icall should be wrapped.
// The names and meanings are from marshal-ilgen.c.
// ICALL_HANDLES_WRAP_NONE
// ICALL_HANDLES_WRAP_OBJ
// ICALL_HANDLES_WRAP_OBJ_INOUT
// ICALL_HANDLES_WRAP_OBJ_OUT
// ICALL_HANDLES_WRAP_VALUETYPE_REF
//
// In the present implementation, all that matters is, handle-or-not,
// in and out and inout are the same, and none and valuetype_ref are the same.
// Handle creation is in marshal-ilgen.c.
// Map a type to a type class: Void and above.
#define MONO_HANDLE_TYPE_WRAP_void Void
#define MONO_HANDLE_TYPE_WRAP_GPtrArray_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoBoolean ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_const_gunichar2_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gunichar2_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint32 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint64 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gpointer ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gconstpointer ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gsize ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gssize ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guchar_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guint ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_const_guchar_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guint32 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_guint64 ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_int ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_uint ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_PInfo ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_bstr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_bstr_const ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_unsigned_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_unichar2_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_mono_unichar4_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoImage_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoClassField_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoMarshalNative ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoProperty_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_size_t ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoVTable_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoQCallTypeHandle ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoQCallAssemblyHandle ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoAssemblyName_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoBoolean_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoClassField_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoEvent_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoEventInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoMethod_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoMethodInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoPropertyInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoType_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoTypedRef_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gint32_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gint64_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gpointer_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gsize_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_guint32_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_guint64_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_int_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_gint32_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_int_ptr_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_char_ptr_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_guint8_ptr_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoResolveTokenError_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
#define MONO_HANDLE_TYPE_WRAP_MonoSpanOfObjects_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
// HANDLE is not used just to avoid duplicate typedef warnings with some compilers.
// gpointer == void* == HANDLE == FILE_HANDLE == PROCESS_HANDLE.
#define MONO_HANDLE_TYPE_WRAP_char_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_const_char_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_FILE_HANDLE ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoClass_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoEvent_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoGenericParamInfo_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoMethod_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoType_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoTypedRef_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoStackCrawlMark_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gint32_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_gpointer_ptr ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_PROCESS_HANDLE ICALL_HANDLES_WRAP_NONE
#define MONO_HANDLE_TYPE_WRAP_MonoObjectHandleOnStack ICALL_HANDLES_WRAP_NONE
// Please keep this sorted (grep ICALL_HANDLES_WRAP_OBJ$ | sort)
#define MONO_HANDLE_TYPE_WRAP_MonoAppContext ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoAppDomain ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoAppDomainSetup ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoArray ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoAsyncResult ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoCalendarData ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoComInteropProxy ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoComObject ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoCultureData ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoCultureInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoDelegate ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoException ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoInternalThread ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoIOSelectorJob ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoObject ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoManifestResourceInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoMulticastDelegate ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionAssembly ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionAssemblyBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionDynamicMethod ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionEvent ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMonoEvent ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionField ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMarshalAsAttribute ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMethod ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionMethodBody ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionModule ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionModuleBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionParameter ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionProperty ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionSigHelper ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionType ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionTypeBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoRegionInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoString ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoStringBuilder ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoThreadObject ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoTransparentProxy ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoW32ProcessStartInfo ICALL_HANDLES_WRAP_OBJ
#define MONO_HANDLE_TYPE_WRAP_MonoExceptionOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoObjectOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoStringOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoArrayOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoReflectionModuleOut ICALL_HANDLES_WRAP_OBJ_OUT
#define MONO_HANDLE_TYPE_WRAP_MonoW32ProcessInfo_ref ICALL_HANDLES_WRAP_VALUETYPE_REF
// These are rare, and could be eliminated.
// They could be return values, or just separate out parameters.
#define MONO_HANDLE_TYPE_WRAP_MonoObjectInOut ICALL_HANDLES_WRAP_OBJ_INOUT
#define MONO_HANDLE_TYPE_WRAP_MonoArrayInOut ICALL_HANDLES_WRAP_OBJ_INOUT
// Do macro_prefix for type type, mapping type to a type class.
// Note that the macro can further be followed by parameters.
#define MONO_HANDLE_DO3(macro_prefix, type) macro_prefix ## type
#define MONO_HANDLE_DO2(macro_prefix, type) MONO_HANDLE_DO3 (macro_prefix, type)
#define MONO_HANDLE_DO(macro_prefix, type) MONO_HANDLE_DO2 (macro_prefix, MONO_HANDLE_TYPE_WRAP_ ## type)
#define MONO_HANDLE_RETURN_BEGIN(type) MONO_HANDLE_DO (MONO_HANDLE_RETURN_BEGIN_, type) (type)
#define MONO_HANDLE_RETURN_BEGIN_Void(type) /* nothing */
#define MONO_HANDLE_RETURN_BEGIN_ICALL_HANDLES_WRAP_NONE(type) type icall_result =
#define MONO_HANDLE_RETURN_BEGIN_ICALL_HANDLES_WRAP_OBJ(type) type ## Handle icall_result =
#define MONO_HANDLE_RETURN_END(type) MONO_HANDLE_DO (MONO_HANDLE_RETURN_END_, type);
#define MONO_HANDLE_RETURN_END_Void HANDLE_FUNCTION_RETURN ()
#define MONO_HANDLE_RETURN_END_ICALL_HANDLES_WRAP_NONE HANDLE_FUNCTION_RETURN_VAL (icall_result)
#define MONO_HANDLE_RETURN_END_ICALL_HANDLES_WRAP_OBJ HANDLE_FUNCTION_RETURN_OBJ (icall_result)
// Convert raw handles to typed handles, just by casting and copying a pointer.
#define MONO_HANDLE_MARSHAL(type, n) MONO_HANDLE_DO (MONO_HANDLE_MARSHAL_, type) (type, n)
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_NONE(type, n) a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_OBJ(type, n) *(type ## Handle*)&a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) *(type ## Handle*)&a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) *(type ## Handle*)&a ## n
#define MONO_HANDLE_MARSHAL_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) a ## n
// Declare and initialize a local for an object in, out, inout parameters, upon input.
#define MONO_HANDLE_REGISTER_ICALL_LOCALS(type, n) MONO_HANDLE_DO (MONO_HANDLE_REGISTER_ICALL_LOCALS_, type) (type, n)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_NONE(type, n) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_OBJ(type, n) type ## Handle a ## n = MONO_HANDLE_NEW (type, a ## n ## _raw);
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) unused_untested_looks_correct1 type ## Handle a ## n = MONO_HANDLE_NEW (type, NULL);
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) unused_untested_looks_correct2 type ## Handle a ## n = MONO_HANDLE_NEW (type, *a ## n ## _raw);
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) FIXME restore mono_icall_handle_new_interior from e8b037642104527bd9b9ba70d502210b9c12d2b8 \
type ## Handle a ## n = mono_icall_handle_new_interior (a ## n ## _raw);
// Produce all the locals, i.e. up to one per parameter.
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_0() /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_1(t0) MONO_HANDLE_REGISTER_ICALL_LOCALS (t0, 0)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_2(t0, t1) MONO_HANDLE_REGISTER_ICALL_LOCALS_1 (t0) MONO_HANDLE_REGISTER_ICALL_LOCALS (t1, 1)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_3(t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_LOCALS_2 (t0, t1) MONO_HANDLE_REGISTER_ICALL_LOCALS (t2, 2)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_4(t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_LOCALS_3 (t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_LOCALS (t3, 3)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_5(t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_LOCALS_4 (t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_LOCALS (t4, 4)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_LOCALS_5 (t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_LOCALS (t5, 5)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_LOCALS_6 (t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_LOCALS (t6, 6)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_LOCALS_7 (t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_LOCALS (t7, 7)
#define MONO_HANDLE_REGISTER_ICALL_LOCALS_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_REGISTER_ICALL_LOCALS_8 (t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_LOCALS (t8, 8)
// Convert a typed handle to raw pointer upon output.
#define MONO_HANDLE_REGISTER_ICALL_OUT(type, n) MONO_HANDLE_DO (MONO_HANDLE_REGISTER_ICALL_OUT_, type) (type, n)
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_NONE(type, n) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_OBJ(type, n) /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) unused_untested_looks_correct3 *a ## n ## _raw = MONO_HANDLE_RAW (a ## n);
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_WRAP_OBJ_INOUT unused_untested_looks_correct4 *a ## n ## _raw = MONO_HANDLE_RAW (a ## n);
#define MONO_HANDLE_REGISTER_ICALL_OUT_ICALL_HANDLES_VALUETYPE_REF(type, n) /* nothing */
// Convert all the typed handles to raw pointers upon output, i.e. up to one per parameter.
#define MONO_HANDLE_REGISTER_ICALL_OUT_0() /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_OUT_1(t0) MONO_HANDLE_REGISTER_ICALL_OUT (t0, 0)
#define MONO_HANDLE_REGISTER_ICALL_OUT_2(t0, t1) MONO_HANDLE_REGISTER_ICALL_OUT_1 (t0) MONO_HANDLE_REGISTER_ICALL_OUT (t1, 1)
#define MONO_HANDLE_REGISTER_ICALL_OUT_3(t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_OUT_2 (t0, t1) MONO_HANDLE_REGISTER_ICALL_OUT (t2, 2)
#define MONO_HANDLE_REGISTER_ICALL_OUT_4(t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_OUT_3 (t0, t1, t2) MONO_HANDLE_REGISTER_ICALL_OUT (t3, 3)
#define MONO_HANDLE_REGISTER_ICALL_OUT_5(t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_OUT_4 (t0, t1, t2, t3) MONO_HANDLE_REGISTER_ICALL_OUT (t4, 4)
#define MONO_HANDLE_REGISTER_ICALL_OUT_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_OUT_5 (t0, t1, t2, t3, t4) MONO_HANDLE_REGISTER_ICALL_OUT (t5, 5)
#define MONO_HANDLE_REGISTER_ICALL_OUT_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_OUT_6 (t0, t1, t2, t3, t4, t5) MONO_HANDLE_REGISTER_ICALL_OUT (t6, 6)
#define MONO_HANDLE_REGISTER_ICALL_OUT_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_OUT_7 (t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_REGISTER_ICALL_OUT (t7, 7)
#define MONO_HANDLE_REGISTER_ICALL_OUT_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_REGISTER_ICALL_OUT_8 (t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_REGISTER_ICALL_OUT (t8, 8)
#define MONO_HANDLE_TYPE_TYPED(type) MONO_HANDLE_DO (MONO_HANDLE_TYPE_TYPED_, type) (type)
#define MONO_HANDLE_TYPE_TYPED_Void(type) type
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_NONE(type) type
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_OBJ(type) type ## Handle
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_OBJ_OUT(type) type ## Handle
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_OBJ_INOUT(type) type ## Handle
#define MONO_HANDLE_TYPE_TYPED_ICALL_HANDLES_WRAP_VALUETYPE_REF(type) type
// Map a type to a raw handle, or itself.
#define MONO_HANDLE_TYPE_RAWHANDLE(type) MONO_HANDLE_DO (MONO_HANDLE_TYPE_RAWHANDLE_, type) (type)
#define MONO_HANDLE_TYPE_RAWHANDLE_Void(type) type
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_NONE(type) type
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ(type) MonoRawHandle
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_OUT(type) MonoRawHandle
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_INOUT(type) MonoRawHandle
#define MONO_HANDLE_TYPE_RAWHANDLE_ICALL_HANDLES_WRAP_VALUETYPE_REF(type) type
// Map a type to a raw pointer, or itself.
#define MONO_HANDLE_TYPE_RAWPOINTER(type) MONO_HANDLE_DO (MONO_HANDLE_TYPE_RAWPOINTER_, type) (type)
#define MONO_HANDLE_TYPE_RAWPOINTER_Void(type) type
#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_NONE(type) type
#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ(type) type*
// Only used for return types.
//#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_OUT(type) type*
//#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_INOUT(type) type*
#define MONO_HANDLE_TYPE_RAWPOINTER_ICALL_HANDLES_WRAP_VALUETYPE_REF(type) type
// Type/name in raw handle prototype and implementation.
#define MONO_HANDLE_ARG_RAWHANDLE(type, n) MONO_HANDLE_DO (MONO_HANDLE_ARG_RAWHANDLE_, type) (type, n)
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_NONE(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
#define MONO_HANDLE_ARG_RAWHANDLE_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) MONO_HANDLE_TYPE_RAWHANDLE (type) a ## n
// Type/name in raw pointer prototype and implementation.
#define MONO_HANDLE_ARG_RAWPOINTER(type, n) MONO_HANDLE_DO (MONO_HANDLE_ARG_RAWPOINTER_, type) (type, n)
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_NONE(type, n) MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ(type, n) MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n ## _raw
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_OUT(type, n) unused_untested_looks_correct5 MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n ## _raw
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_OBJ_INOUT(type, n) unused_untested_looks_correct6 MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n ## _raw
#define MONO_HANDLE_ARG_RAWPOINTER_ICALL_HANDLES_WRAP_VALUETYPE_REF(type, n) FIXME //MONO_HANDLE_TYPE_RAWPOINTER (type) a ## n
// Generate a parameter list, types only, for a function accepting/returning typed handles.
#define MONO_HANDLE_FOREACH_TYPE_TYPED_0() /* nothing */
#define MONO_HANDLE_FOREACH_TYPE_TYPED_1(t0) MONO_HANDLE_TYPE_TYPED (t0)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_2(t0, t1) MONO_HANDLE_FOREACH_TYPE_TYPED_1 (t0) ,MONO_HANDLE_TYPE_TYPED (t1)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_3(t0, t1, t2) MONO_HANDLE_FOREACH_TYPE_TYPED_2 (t0, t1) ,MONO_HANDLE_TYPE_TYPED (t2)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_4(t0, t1, t2, t3) MONO_HANDLE_FOREACH_TYPE_TYPED_3 (t0, t1, t2) ,MONO_HANDLE_TYPE_TYPED (t3)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_5(t0, t1, t2, t3, t4) MONO_HANDLE_FOREACH_TYPE_TYPED_4 (t0, t1, t2, t3) ,MONO_HANDLE_TYPE_TYPED (t4)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_FOREACH_TYPE_TYPED_5 (t0, t1, t2, t3, t4) ,MONO_HANDLE_TYPE_TYPED (t5)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_FOREACH_TYPE_TYPED_6 (t0, t1, t2, t3, t4, t5) ,MONO_HANDLE_TYPE_TYPED (t6)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_FOREACH_TYPE_TYPED_7 (t0, t1, t2, t3, t4, t5, t6) ,MONO_HANDLE_TYPE_TYPED (t7)
#define MONO_HANDLE_FOREACH_TYPE_TYPED_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_FOREACH_TYPE_TYPED_8 (t0, t1, t2, t3, t4, t5, t6, t7) ,MONO_HANDLE_TYPE_TYPED (t8)
// Generate a parameter list, types and names, for a function accepting raw handles and no MonoError,
// and returning a raw pointer.
#define MONO_HANDLE_FOREACH_ARG_RAW_0() void
#define MONO_HANDLE_FOREACH_ARG_RAW_1(t0) MONO_HANDLE_ARG_RAWHANDLE (t0, 0)
#define MONO_HANDLE_FOREACH_ARG_RAW_2(t0, t1) MONO_HANDLE_FOREACH_ARG_RAW_1 (t0), MONO_HANDLE_ARG_RAWHANDLE (t1, 1)
#define MONO_HANDLE_FOREACH_ARG_RAW_3(t0, t1, t2) MONO_HANDLE_FOREACH_ARG_RAW_2 (t0, t1), MONO_HANDLE_ARG_RAWHANDLE (t2, 2)
#define MONO_HANDLE_FOREACH_ARG_RAW_4(t0, t1, t2, t3) MONO_HANDLE_FOREACH_ARG_RAW_3 (t0, t1, t2), MONO_HANDLE_ARG_RAWHANDLE (t3, 3)
#define MONO_HANDLE_FOREACH_ARG_RAW_5(t0, t1, t2, t3, t4) MONO_HANDLE_FOREACH_ARG_RAW_4 (t0, t1, t2, t3), MONO_HANDLE_ARG_RAWHANDLE (t4, 4)
#define MONO_HANDLE_FOREACH_ARG_RAW_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_FOREACH_ARG_RAW_5 (t0, t1, t2, t3, t4), MONO_HANDLE_ARG_RAWHANDLE (t5, 5)
#define MONO_HANDLE_FOREACH_ARG_RAW_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_FOREACH_ARG_RAW_6 (t0, t1, t2, t3, t4, t5), MONO_HANDLE_ARG_RAWHANDLE (t6, 6)
#define MONO_HANDLE_FOREACH_ARG_RAW_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_FOREACH_ARG_RAW_7 (t0, t1, t2, t3, t4, t5, t6), MONO_HANDLE_ARG_RAWHANDLE (t7, 7)
#define MONO_HANDLE_FOREACH_ARG_RAW_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_FOREACH_ARG_RAW_8 (t0, t1, t2, t3, t4, t5, t6, t7), MONO_HANDLE_ARG_RAWHANDLE (t8, 8)
// Generate a parameter list, types and names, for a function accepting raw pointers and no MonoError,
// and returning a raw pointer.
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_0() void
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_1(t0) MONO_HANDLE_ARG_RAWPOINTER (t0, 0)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_2(t0, t1) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_1 (t0), MONO_HANDLE_ARG_RAWPOINTER (t1, 1)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_3(t0, t1, t2) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_2 (t0, t1), MONO_HANDLE_ARG_RAWPOINTER (t2, 2)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_4(t0, t1, t2, t3) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_3 (t0, t1, t2), MONO_HANDLE_ARG_RAWPOINTER (t3, 3)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_5(t0, t1, t2, t3, t4) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_4 (t0, t1, t2, t3), MONO_HANDLE_ARG_RAWPOINTER (t4, 4)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_5 (t0, t1, t2, t3, t4), MONO_HANDLE_ARG_RAWPOINTER (t5, 5)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_6 (t0, t1, t2, t3, t4, t5), MONO_HANDLE_ARG_RAWPOINTER (t6, 6)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_7 (t0, t1, t2, t3, t4, t5, t6), MONO_HANDLE_ARG_RAWPOINTER (t7, 7)
#define MONO_HANDLE_FOREACH_ARG_RAWPOINTER_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_FOREACH_ARG_RAWPOINTER_8 (t0, t1, t2, t3, t4, t5, t6, t7), MONO_HANDLE_ARG_RAWPOINTER (t8, 8)
#define MONO_HANDLE_REGISTER_ICALL_CALL_0 /* nothing */
#define MONO_HANDLE_REGISTER_ICALL_CALL_1 a0,
#define MONO_HANDLE_REGISTER_ICALL_CALL_2 MONO_HANDLE_REGISTER_ICALL_CALL_1 a1,
#define MONO_HANDLE_REGISTER_ICALL_CALL_3 MONO_HANDLE_REGISTER_ICALL_CALL_2 a2,
#define MONO_HANDLE_REGISTER_ICALL_CALL_4 MONO_HANDLE_REGISTER_ICALL_CALL_3 a3,
#define MONO_HANDLE_REGISTER_ICALL_CALL_5 MONO_HANDLE_REGISTER_ICALL_CALL_4 a4,
#define MONO_HANDLE_REGISTER_ICALL_CALL_6 MONO_HANDLE_REGISTER_ICALL_CALL_5 a5,
#define MONO_HANDLE_REGISTER_ICALL_CALL_7 MONO_HANDLE_REGISTER_ICALL_CALL_6 a6,
#define MONO_HANDLE_REGISTER_ICALL_CALL_8 MONO_HANDLE_REGISTER_ICALL_CALL_7 a7,
#define MONO_HANDLE_REGISTER_ICALL_CALL_9 MONO_HANDLE_REGISTER_ICALL_CALL_8 a8,
// Call from the wrapper to the actual icall, passing on the
// WRAP_NONE parameters directly, casting handles from raw to typed.
#define MONO_HANDLE_CALL_0() /* nothing */
#define MONO_HANDLE_CALL_1(t0) MONO_HANDLE_MARSHAL (t0, 0)
#define MONO_HANDLE_CALL_2(t0, t1) MONO_HANDLE_CALL_1 (t0), MONO_HANDLE_MARSHAL (t1, 1)
#define MONO_HANDLE_CALL_3(t0, t1, t2) MONO_HANDLE_CALL_2 (t0, t1), MONO_HANDLE_MARSHAL (t2, 2)
#define MONO_HANDLE_CALL_4(t0, t1, t2, t3) MONO_HANDLE_CALL_3 (t0, t1, t2), MONO_HANDLE_MARSHAL (t3, 3)
#define MONO_HANDLE_CALL_5(t0, t1, t2, t3, t4) MONO_HANDLE_CALL_4 (t0, t1, t2, t3), MONO_HANDLE_MARSHAL (t4, 4)
#define MONO_HANDLE_CALL_6(t0, t1, t2, t3, t4, t5) MONO_HANDLE_CALL_5 (t0, t1, t2, t3, t4), MONO_HANDLE_MARSHAL (t5, 5)
#define MONO_HANDLE_CALL_7(t0, t1, t2, t3, t4, t5, t6) MONO_HANDLE_CALL_6 (t0, t1, t2, t3, t4, t5), MONO_HANDLE_MARSHAL (t6, 6)
#define MONO_HANDLE_CALL_8(t0, t1, t2, t3, t4, t5, t6, t7) MONO_HANDLE_CALL_7 (t0, t1, t2, t3, t4, t5, t6), MONO_HANDLE_MARSHAL (t7, 7)
#define MONO_HANDLE_CALL_9(t0, t1, t2, t3, t4, t5, t6, t7, t8) MONO_HANDLE_CALL_8 (t0, t1, t2, t3, t4, t5, t6, t7), MONO_HANDLE_MARSHAL (t8, 8)
// Place a comma after a parameter list of length n, i.e. nothing for 0, else comma.
#define MONO_HANDLE_COMMA_0 /* nothing */
#define MONO_HANDLE_COMMA_1 ,
#define MONO_HANDLE_COMMA_2 ,
#define MONO_HANDLE_COMMA_3 ,
#define MONO_HANDLE_COMMA_4 ,
#define MONO_HANDLE_COMMA_5 ,
#define MONO_HANDLE_COMMA_6 ,
#define MONO_HANDLE_COMMA_7 ,
#define MONO_HANDLE_COMMA_8 ,
#define MONO_HANDLE_COMMA_9 ,
// Declare the function that takes/returns typed handles and a MonoError.
#define MONO_HANDLE_DECLARE(id, name, func, rettype, n, argtypes) \
MONO_HANDLE_TYPE_TYPED (rettype) \
func (MONO_HANDLE_FOREACH_TYPE_TYPED_ ## n argtypes MONO_HANDLE_COMMA_ ## n MonoError *error)
// Declare the function wrapper that takes raw handles and returns a raw pointer.
#define MONO_HANDLE_DECLARE_RAW(id, name, func, rettype, n, argtypes) \
ICALL_EXPORT MONO_HANDLE_TYPE_RAWPOINTER (rettype) \
func ## _raw ( MONO_HANDLE_FOREACH_ARG_RAW_ ## n argtypes)
// Implement ves_icall_foo_raw over ves_icall_foo.
// Raw handles are converted to/from typed handles and the rest is passed through.
// This is for functions in icall-def.h.
#define MONO_HANDLE_IMPLEMENT(id, name, func, rettype, n, argtypes) \
\
MONO_HANDLE_DECLARE_RAW (id, name, func, rettype, n, argtypes) \
{ \
HANDLE_FUNCTION_ENTER (); \
\
ERROR_DECL (error); \
\
MONO_HANDLE_RETURN_BEGIN (rettype) \
\
func (MONO_HANDLE_CALL_ ## n argtypes MONO_HANDLE_COMMA_ ## n error); \
\
mono_error_set_pending_exception (error); \
\
MONO_HANDLE_RETURN_END (rettype) \
} \
// Declare the function that takes/returns raw pointers and no MonoError.
#define MONO_HANDLE_REGISTER_ICALL_DECLARE_RAW(func, rettype, n, argtypes) \
ICALL_EXPORT MONO_HANDLE_TYPE_RAWPOINTER (rettype) \
func ( MONO_HANDLE_FOREACH_ARG_RAWPOINTER_ ## n argtypes)
// Implement ves_icall_foo over ves_icall_foo_impl.
//
// Raw pointers are converted to/from handles and the rest is passed through.
// The in/out/inout-ness of parameters must be correct. (unlike MONO_HANDLE_IMPLEMENT)
// Valuetype-refs are not handled. (unlike MONO_HANDLE_IMPLEMENT)
// Handle creation is less efficient than MONO_HANDLE_IMPLEMENT (marshal-ilgen.c) -- using TLS
// and per-handle work.
//
// In future this should produce an array of IcallHandlesWrap and send that through
// to emit_native_icall_wrapper_ilgen to gain its efficient handles.
//
// Or put the handles directly in the coop frame, or pointers to them.
// i.e. one TLS access at function start and end.
//
// This is for functions passed to mono_register_jit_icall_info, etc.
#define MONO_HANDLE_REGISTER_ICALL_IMPLEMENT(func, rettype, n, argtypes) \
\
MONO_HANDLE_REGISTER_ICALL_DECLARE_RAW (func, rettype, n, argtypes) \
{ \
HANDLE_FUNCTION_ENTER (); \
\
ERROR_DECL (error); \
\
MONO_HANDLE_REGISTER_ICALL_LOCALS_ ## n argtypes \
\
MONO_HANDLE_RETURN_BEGIN (rettype) \
\
func ## _impl (MONO_HANDLE_REGISTER_ICALL_CALL_ ## n error); \
\
MONO_HANDLE_REGISTER_ICALL_OUT_ ## n argtypes \
\
mono_error_set_pending_exception (error); \
\
MONO_HANDLE_RETURN_END (rettype) \
} \
#endif
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/pal/src/libunwind/tests/test-flush-cache.c | /* libunwind - a platform-independent unwind library
Copyright (C) 2003 Hewlett-Packard Co
Contributed by David Mosberger-Tang <[email protected]>
This file is part of libunwind.
Copyright (c) 2003 Hewlett-Packard Co.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include <stdio.h>
#include <string.h>
#define UNW_LOCAL_ONLY /* must define this for consistency with backtrace() */
#include <libunwind.h>
int verbose;
int
f257 (void)
{
void *buffer[300];
int i, n;
if (verbose)
printf ("First backtrace:\n");
n = unw_backtrace (buffer, 300);
if (verbose)
for (i = 0; i < n; ++i)
printf ("[%d] ip=%p\n", i, buffer[i]);
unw_set_cache_size (unw_local_addr_space, 1023, 0);
unw_flush_cache (unw_local_addr_space, 0, 0);
if (verbose)
printf ("\nSecond backtrace:\n");
n = unw_backtrace (buffer, 300);
if (verbose)
for (i = 0; i < n; ++i)
printf ("[%d] ip=%p\n", i, buffer[i]);
return 0;
}
#define F(n,m) \
int \
f##n (void) \
{ \
return f##m (); \
}
/* Here, we rely on the fact that the script-cache's hash-table is 256
entries big. With 257 functions, we're guaranteed to get at least
one hash-collision. */
F(256,257) F(255,256) F(254,255) F(253,254)
F(252,253) F(251,252) F(250,251) F(249,250)
F(248,249) F(247,248) F(246,247) F(245,246)
F(244,245) F(243,244) F(242,243) F(241,242)
F(240,241) F(239,240) F(238,239) F(237,238)
F(236,237) F(235,236) F(234,235) F(233,234)
F(232,233) F(231,232) F(230,231) F(229,230)
F(228,229) F(227,228) F(226,227) F(225,226)
F(224,225) F(223,224) F(222,223) F(221,222)
F(220,221) F(219,220) F(218,219) F(217,218)
F(216,217) F(215,216) F(214,215) F(213,214)
F(212,213) F(211,212) F(210,211) F(209,210)
F(208,209) F(207,208) F(206,207) F(205,206)
F(204,205) F(203,204) F(202,203) F(201,202)
F(200,201) F(199,200) F(198,199) F(197,198)
F(196,197) F(195,196) F(194,195) F(193,194)
F(192,193) F(191,192) F(190,191) F(189,190)
F(188,189) F(187,188) F(186,187) F(185,186)
F(184,185) F(183,184) F(182,183) F(181,182)
F(180,181) F(179,180) F(178,179) F(177,178)
F(176,177) F(175,176) F(174,175) F(173,174)
F(172,173) F(171,172) F(170,171) F(169,170)
F(168,169) F(167,168) F(166,167) F(165,166)
F(164,165) F(163,164) F(162,163) F(161,162)
F(160,161) F(159,160) F(158,159) F(157,158)
F(156,157) F(155,156) F(154,155) F(153,154)
F(152,153) F(151,152) F(150,151) F(149,150)
F(148,149) F(147,148) F(146,147) F(145,146)
F(144,145) F(143,144) F(142,143) F(141,142)
F(140,141) F(139,140) F(138,139) F(137,138)
F(136,137) F(135,136) F(134,135) F(133,134)
F(132,133) F(131,132) F(130,131) F(129,130)
F(128,129) F(127,128) F(126,127) F(125,126)
F(124,125) F(123,124) F(122,123) F(121,122)
F(120,121) F(119,120) F(118,119) F(117,118)
F(116,117) F(115,116) F(114,115) F(113,114)
F(112,113) F(111,112) F(110,111) F(109,110)
F(108,109) F(107,108) F(106,107) F(105,106)
F(104,105) F(103,104) F(102,103) F(101,102)
F(100,101) F(99,100) F(98,99) F(97,98)
F(96,97) F(95,96) F(94,95) F(93,94)
F(92,93) F(91,92) F(90,91) F(89,90)
F(88,89) F(87,88) F(86,87) F(85,86)
F(84,85) F(83,84) F(82,83) F(81,82)
F(80,81) F(79,80) F(78,79) F(77,78)
F(76,77) F(75,76) F(74,75) F(73,74)
F(72,73) F(71,72) F(70,71) F(69,70)
F(68,69) F(67,68) F(66,67) F(65,66)
F(64,65) F(63,64) F(62,63) F(61,62)
F(60,61) F(59,60) F(58,59) F(57,58)
F(56,57) F(55,56) F(54,55) F(53,54)
F(52,53) F(51,52) F(50,51) F(49,50)
F(48,49) F(47,48) F(46,47) F(45,46)
F(44,45) F(43,44) F(42,43) F(41,42)
F(40,41) F(39,40) F(38,39) F(37,38)
F(36,37) F(35,36) F(34,35) F(33,34)
F(32,33) F(31,32) F(30,31) F(29,30)
F(28,29) F(27,28) F(26,27) F(25,26)
F(24,25) F(23,24) F(22,23) F(21,22)
F(20,21) F(19,20) F(18,19) F(17,18)
F(16,17) F(15,16) F(14,15) F(13,14)
F(12,13) F(11,12) F(10,11) F(9,10)
F(8,9) F(7,8) F(6,7) F(5,6)
F(4,5) F(3,4) F(2,3) F(1,2)
int
main (int argc, char **argv)
{
if (argc > 1 && strcmp (argv[1], "-v") == 0)
verbose = 1;
return f1 ();
}
| /* libunwind - a platform-independent unwind library
Copyright (C) 2003 Hewlett-Packard Co
Contributed by David Mosberger-Tang <[email protected]>
This file is part of libunwind.
Copyright (c) 2003 Hewlett-Packard Co.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include <stdio.h>
#include <string.h>
#define UNW_LOCAL_ONLY /* must define this for consistency with backtrace() */
#include <libunwind.h>
int verbose;
int
f257 (void)
{
void *buffer[300];
int i, n;
if (verbose)
printf ("First backtrace:\n");
n = unw_backtrace (buffer, 300);
if (verbose)
for (i = 0; i < n; ++i)
printf ("[%d] ip=%p\n", i, buffer[i]);
unw_set_cache_size (unw_local_addr_space, 1023, 0);
unw_flush_cache (unw_local_addr_space, 0, 0);
if (verbose)
printf ("\nSecond backtrace:\n");
n = unw_backtrace (buffer, 300);
if (verbose)
for (i = 0; i < n; ++i)
printf ("[%d] ip=%p\n", i, buffer[i]);
return 0;
}
#define F(n,m) \
int \
f##n (void) \
{ \
return f##m (); \
}
/* Here, we rely on the fact that the script-cache's hash-table is 256
entries big. With 257 functions, we're guaranteed to get at least
one hash-collision. */
F(256,257) F(255,256) F(254,255) F(253,254)
F(252,253) F(251,252) F(250,251) F(249,250)
F(248,249) F(247,248) F(246,247) F(245,246)
F(244,245) F(243,244) F(242,243) F(241,242)
F(240,241) F(239,240) F(238,239) F(237,238)
F(236,237) F(235,236) F(234,235) F(233,234)
F(232,233) F(231,232) F(230,231) F(229,230)
F(228,229) F(227,228) F(226,227) F(225,226)
F(224,225) F(223,224) F(222,223) F(221,222)
F(220,221) F(219,220) F(218,219) F(217,218)
F(216,217) F(215,216) F(214,215) F(213,214)
F(212,213) F(211,212) F(210,211) F(209,210)
F(208,209) F(207,208) F(206,207) F(205,206)
F(204,205) F(203,204) F(202,203) F(201,202)
F(200,201) F(199,200) F(198,199) F(197,198)
F(196,197) F(195,196) F(194,195) F(193,194)
F(192,193) F(191,192) F(190,191) F(189,190)
F(188,189) F(187,188) F(186,187) F(185,186)
F(184,185) F(183,184) F(182,183) F(181,182)
F(180,181) F(179,180) F(178,179) F(177,178)
F(176,177) F(175,176) F(174,175) F(173,174)
F(172,173) F(171,172) F(170,171) F(169,170)
F(168,169) F(167,168) F(166,167) F(165,166)
F(164,165) F(163,164) F(162,163) F(161,162)
F(160,161) F(159,160) F(158,159) F(157,158)
F(156,157) F(155,156) F(154,155) F(153,154)
F(152,153) F(151,152) F(150,151) F(149,150)
F(148,149) F(147,148) F(146,147) F(145,146)
F(144,145) F(143,144) F(142,143) F(141,142)
F(140,141) F(139,140) F(138,139) F(137,138)
F(136,137) F(135,136) F(134,135) F(133,134)
F(132,133) F(131,132) F(130,131) F(129,130)
F(128,129) F(127,128) F(126,127) F(125,126)
F(124,125) F(123,124) F(122,123) F(121,122)
F(120,121) F(119,120) F(118,119) F(117,118)
F(116,117) F(115,116) F(114,115) F(113,114)
F(112,113) F(111,112) F(110,111) F(109,110)
F(108,109) F(107,108) F(106,107) F(105,106)
F(104,105) F(103,104) F(102,103) F(101,102)
F(100,101) F(99,100) F(98,99) F(97,98)
F(96,97) F(95,96) F(94,95) F(93,94)
F(92,93) F(91,92) F(90,91) F(89,90)
F(88,89) F(87,88) F(86,87) F(85,86)
F(84,85) F(83,84) F(82,83) F(81,82)
F(80,81) F(79,80) F(78,79) F(77,78)
F(76,77) F(75,76) F(74,75) F(73,74)
F(72,73) F(71,72) F(70,71) F(69,70)
F(68,69) F(67,68) F(66,67) F(65,66)
F(64,65) F(63,64) F(62,63) F(61,62)
F(60,61) F(59,60) F(58,59) F(57,58)
F(56,57) F(55,56) F(54,55) F(53,54)
F(52,53) F(51,52) F(50,51) F(49,50)
F(48,49) F(47,48) F(46,47) F(45,46)
F(44,45) F(43,44) F(42,43) F(41,42)
F(40,41) F(39,40) F(38,39) F(37,38)
F(36,37) F(35,36) F(34,35) F(33,34)
F(32,33) F(31,32) F(30,31) F(29,30)
F(28,29) F(27,28) F(26,27) F(25,26)
F(24,25) F(23,24) F(22,23) F(21,22)
F(20,21) F(19,20) F(18,19) F(17,18)
F(16,17) F(15,16) F(14,15) F(13,14)
F(12,13) F(11,12) F(10,11) F(9,10)
F(8,9) F(7,8) F(6,7) F(5,6)
F(4,5) F(3,4) F(2,3) F(1,2)
int
main (int argc, char **argv)
{
if (argc > 1 && strcmp (argv[1], "-v") == 0)
verbose = 1;
return f1 ();
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/pal/inc/pal_mstypes.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
--*/
////////////////////////////////////////////////////////////////////////
// Extensions to the usual posix header files
////////////////////////////////////////////////////////////////////////
#ifndef __PAL_MSTYPES_H__
#define __PAL_MSTYPES_H__
#ifdef __cplusplus
extern "C" {
#endif
////////////////////////////////////////////////////////////////////////
// calling convention stuff
////////////////////////////////////////////////////////////////////////
#ifdef __cplusplus
#define EXTERN_C extern "C"
#else
#define EXTERN_C
#endif // __cplusplus
#ifndef _MSC_VER
// Note: Win32-hosted GCC predefines __stdcall and __cdecl, but Unix-
// hosted GCC does not.
#ifdef __i386__
#if !defined(__stdcall)
#define __stdcall __attribute__((stdcall))
#endif
#if !defined(_stdcall)
#define _stdcall __stdcall
#endif
#if !defined(__cdecl)
#define __cdecl __attribute__((cdecl))
#endif
#if !defined(_cdecl)
#define _cdecl __cdecl
#endif
#if !defined(CDECL)
#define CDECL __cdecl
#endif
#else // !defined(__i386__)
#define __stdcall
#define _stdcall
#define __cdecl
#define _cdecl
#define CDECL
// On ARM __fastcall is ignored and causes a compile error
#if !defined(PAL_STDCPP_COMPAT) || defined(__arm__)
# undef __fastcall
# undef _fastcall
# define __fastcall
# define _fastcall
#endif // !defined(PAL_STDCPP_COMPAT) || defined(__arm__)
#endif // !defined(__i386__)
#define CALLBACK __cdecl
#if !defined(_declspec)
#define _declspec(e) __declspec(e)
#endif
#if defined(_VAC_) && defined(__cplusplus)
#define __inline inline
#endif
#define __forceinline inline
#endif // !_MSC_VER
#ifdef _MSC_VER
#if defined(PAL_IMPLEMENTATION)
#define PALIMPORT
#else
#define PALIMPORT __declspec(dllimport)
#endif
#define DLLEXPORT __declspec(dllexport)
#define PAL_NORETURN __declspec(noreturn)
#else
#define PALIMPORT
#define DLLEXPORT __attribute__((visibility("default")))
#define PAL_NORETURN __attribute__((noreturn))
#endif
#define PALAPI DLLEXPORT __cdecl
#define PALAPI_NOEXPORT __cdecl
#define PALAPIV __cdecl
////////////////////////////////////////////////////////////////////////
// Type attribute stuff
////////////////////////////////////////////////////////////////////////
#define CONST const
#define IN
#define OUT
#define OPTIONAL
#define FAR
#ifdef UNICODE
#define __TEXT(x) L##x
#else
#define __TEXT(x) x
#endif
#define TEXT(x) __TEXT(x)
////////////////////////////////////////////////////////////////////////
// Some special values
////////////////////////////////////////////////////////////////////////
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
////////////////////////////////////////////////////////////////////////
// Misc. type helpers
////////////////////////////////////////////////////////////////////////
#ifdef _MSC_VER
// MSVC's way of declaring large integer constants
// If you define these in one step, without the _HELPER macros, you
// get extra whitespace when composing these with other concatenating macros.
#define I64_HELPER(x) x ## i64
#define I64(x) I64_HELPER(x)
#define UI64_HELPER(x) x ## ui64
#define UI64(x) UI64_HELPER(x)
#else // _MSC_VER
// GCC's way of declaring large integer constants
// If you define these in one step, without the _HELPER macros, you
// get extra whitespace when composing these with other concatenating macros.
#define I64_HELPER(x) x ## LL
#define I64(x) I64_HELPER(x)
#define UI64_HELPER(x) x ## ULL
#define UI64(x) UI64_HELPER(x)
#endif // _MSC_VER
////////////////////////////////////////////////////////////////////////
// Misc. types
////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER
// A bunch of source files (e.g. most of the ndp tree) include pal.h
// but are written to be LLP64, not LP64. (LP64 => long = 64 bits
// LLP64 => longs = 32 bits, long long = 64 bits)
//
// To handle this difference, we #define long to be int (and thus 32 bits) when
// compiling those files. (See the bottom of this file or search for
// #define long to see where we do this.)
//
// But this fix is more complicated than it seems, because we also use the
// preprocessor to #define __int64 to long for LP64 architectures (__int64
// isn't a builtin in gcc). We don't want __int64 to be an int (by cascading
// macro rules). So we play this little trick below where we add
// __cppmungestrip before "long", which is what we're really #defining __int64
// to. The preprocessor sees __cppmungestriplong as something different than
// long, so it doesn't replace it with int. The during the cppmunge phase, we
// remove the __cppmungestrip part, leaving long for the compiler to see.
//
// Note that we can't just use a typedef to define __int64 as long before
// #defining long because typedefed types can't be signedness-agnostic (i.e.
// they must be either signed or unsigned) and we want to be able to use
// __int64 as though it were intrinsic
#ifdef HOST_64BIT
#define __int64 long
#else // HOST_64BIT
#define __int64 long long
#endif // HOST_64BIT
#define __int32 int
#define __int16 short int
#define __int8 char // assumes char is signed
#endif // _MSC_VER
#ifndef PAL_STDCPP_COMPAT
// Defined in gnu's types.h. For non PAL_IMPLEMENTATION system
// includes are not included, so we need to define them.
#ifndef PAL_IMPLEMENTATION
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
#endif // PAL_IMPLEMENTATION
#ifndef _MSC_VER
#if HOST_64BIT
typedef long double LONG_DOUBLE;
#endif
#endif // _MSC_VER
#endif // !PAL_STDCPP_COMPAT
typedef void VOID;
typedef int LONG; // NOTE: diff from windows.h, for LP64 compat
typedef unsigned int ULONG; // NOTE: diff from windows.h, for LP64 compat
typedef __int64 LONGLONG;
typedef unsigned __int64 ULONGLONG;
typedef ULONGLONG DWORD64;
typedef DWORD64 *PDWORD64;
typedef LONGLONG *PLONG64;
typedef ULONGLONG *PULONG64;
typedef ULONGLONG *PULONGLONG;
typedef ULONG *PULONG;
typedef short SHORT;
typedef SHORT *PSHORT;
typedef unsigned short USHORT;
typedef USHORT *PUSHORT;
typedef unsigned char UCHAR;
typedef UCHAR *PUCHAR;
typedef char *PSZ;
typedef ULONGLONG DWORDLONG;
typedef unsigned int DWORD; // NOTE: diff from windows.h, for LP64 compat
typedef unsigned int DWORD32, *PDWORD32;
typedef int BOOL;
typedef unsigned char BYTE;
typedef unsigned short WORD;
typedef float FLOAT;
typedef double DOUBLE;
typedef BOOL *PBOOL;
typedef BOOL *LPBOOL;
typedef BYTE *PBYTE;
typedef BYTE *LPBYTE;
typedef const BYTE *LPCBYTE;
typedef int *PINT;
typedef int *LPINT;
typedef WORD *PWORD;
typedef WORD *LPWORD;
typedef LONG *LPLONG;
typedef LPLONG PLONG;
typedef DWORD *PDWORD;
typedef DWORD *LPDWORD;
typedef void *PVOID;
typedef void *LPVOID;
typedef CONST void *LPCVOID;
typedef int INT;
typedef unsigned int UINT;
typedef unsigned int *PUINT;
typedef BYTE BOOLEAN;
typedef BOOLEAN *PBOOLEAN;
typedef unsigned __int8 UINT8;
typedef signed __int8 INT8;
typedef unsigned __int16 UINT16;
typedef signed __int16 INT16;
typedef unsigned __int32 UINT32, *PUINT32;
typedef signed __int32 INT32, *PINT32;
typedef unsigned __int64 UINT64, *PUINT64;
typedef signed __int64 INT64, *PINT64;
typedef unsigned __int32 ULONG32, *PULONG32;
typedef signed __int32 LONG32, *PLONG32;
typedef unsigned __int64 ULONG64;
typedef signed __int64 LONG64;
#if defined(HOST_X86) && _MSC_VER >= 1300
#define _W64 __w64
#else
#define _W64
#endif
#ifdef HOST_64BIT
#define _atoi64 (__int64)atoll
typedef __int64 INT_PTR, *PINT_PTR;
typedef unsigned __int64 UINT_PTR, *PUINT_PTR;
typedef __int64 LONG_PTR, *PLONG_PTR;
typedef unsigned __int64 ULONG_PTR, *PULONG_PTR;
typedef unsigned __int64 DWORD_PTR, *PDWORD_PTR;
/* maximum signed 64 bit value */
#define LONG_PTR_MAX I64(9223372036854775807)
/* maximum unsigned 64 bit value */
#define ULONG_PTR_MAX UI64(0xffffffffffffffff)
#ifndef SIZE_MAX
#define SIZE_MAX _UI64_MAX
#endif
#define __int3264 __int64
#if !defined(HOST_64BIT)
__inline
unsigned long
HandleToULong(
const void *h
)
{
return((unsigned long) (ULONG_PTR) h );
}
__inline
long
HandleToLong(
const void *h
)
{
return((long) (LONG_PTR) h );
}
__inline
void *
ULongToHandle(
const unsigned long h
)
{
return((void *) (UINT_PTR) h );
}
__inline
void *
LongToHandle(
const long h
)
{
return((void *) (INT_PTR) h );
}
__inline
unsigned long
PtrToUlong(
const void *p
)
{
return((unsigned long) (ULONG_PTR) p );
}
__inline
unsigned int
PtrToUint(
const void *p
)
{
return((unsigned int) (UINT_PTR) p );
}
__inline
unsigned short
PtrToUshort(
const void *p
)
{
return((unsigned short) (unsigned long) (ULONG_PTR) p );
}
__inline
long
PtrToLong(
const void *p
)
{
return((long) (LONG_PTR) p );
}
__inline
int
PtrToInt(
const void *p
)
{
return((int) (INT_PTR) p );
}
__inline
short
PtrToShort(
const void *p
)
{
return((short) (long) (LONG_PTR) p );
}
__inline
void *
IntToPtr(
const int i
)
// Caution: IntToPtr() sign-extends the int value.
{
return( (void *)(INT_PTR)i );
}
__inline
void *
UIntToPtr(
const unsigned int ui
)
// Caution: UIntToPtr() zero-extends the unsigned int value.
{
return( (void *)(UINT_PTR)ui );
}
__inline
void *
LongToPtr(
const long l
)
// Caution: LongToPtr() sign-extends the long value.
{
return( (void *)(LONG_PTR)l );
}
__inline
void *
ULongToPtr(
const unsigned long ul
)
// Caution: ULongToPtr() zero-extends the unsigned long value.
{
return( (void *)(ULONG_PTR)ul );
}
__inline
void *
ShortToPtr(
const short s
)
// Caution: ShortToPtr() sign-extends the short value.
{
return( (void *)(INT_PTR)s );
}
__inline
void *
UShortToPtr(
const unsigned short us
)
// Caution: UShortToPtr() zero-extends the unsigned short value.
{
return( (void *)(UINT_PTR)us );
}
#else // !defined(HOST_64BIT)
#define HandleToULong( h ) ((ULONG)(ULONG_PTR)(h) )
#define HandleToLong( h ) ((LONG)(LONG_PTR) (h) )
#define ULongToHandle( ul ) ((HANDLE)(ULONG_PTR) (ul) )
#define LongToHandle( h ) ((HANDLE)(LONG_PTR) (h) )
#define PtrToUlong( p ) ((ULONG)(ULONG_PTR) (p) )
#define PtrToLong( p ) ((LONG)(LONG_PTR) (p) )
#define PtrToUint( p ) ((UINT)(UINT_PTR) (p) )
#define PtrToInt( p ) ((INT)(INT_PTR) (p) )
#define PtrToUshort( p ) ((unsigned short)(ULONG_PTR)(p) )
#define PtrToShort( p ) ((short)(LONG_PTR)(p) )
#define IntToPtr( i ) ((VOID *)(INT_PTR)((int)(i)))
#define UIntToPtr( ui ) ((VOID *)(UINT_PTR)((unsigned int)(ui)))
#define LongToPtr( l ) ((VOID *)(LONG_PTR)((long)(l)))
#define ULongToPtr( ul ) ((VOID *)(ULONG_PTR)((unsigned long)(ul)))
#define ShortToPtr( s ) ((VOID *)(INT_PTR)((short)(s)))
#define UShortToPtr( us ) ((VOID *)(UINT_PTR)((unsigned short)(s)))
#endif // !defined(HOST_64BIT)
#else
typedef _W64 __int32 INT_PTR;
typedef _W64 unsigned __int32 UINT_PTR;
typedef _W64 __int32 LONG_PTR;
typedef _W64 unsigned __int32 ULONG_PTR, *PULONG_PTR;
typedef _W64 unsigned __int32 DWORD_PTR, *PDWORD_PTR;
/* maximum signed 32 bit value */
#define LONG_PTR_MAX 2147483647L
/* maximum unsigned 32 bit value */
#define ULONG_PTR_MAX 0xffffffffUL
#ifndef SIZE_MAX
#define SIZE_MAX UINT_MAX
#endif
#define __int3264 __int32
#define HandleToULong( h ) ((ULONG)(ULONG_PTR)(h) )
#define HandleToLong( h ) ((LONG)(LONG_PTR) (h) )
#define ULongToHandle( ul ) ((HANDLE)(ULONG_PTR) (ul) )
#define LongToHandle( h ) ((HANDLE)(LONG_PTR) (h) )
#define PtrToUlong( p ) ((ULONG)(ULONG_PTR) (p) )
#define PtrToLong( p ) ((LONG)(LONG_PTR) (p) )
#define PtrToUint( p ) ((UINT)(UINT_PTR) (p) )
#define PtrToInt( p ) ((INT)(INT_PTR) (p) )
#define PtrToUshort( p ) ((unsigned short)(ULONG_PTR)(p) )
#define PtrToShort( p ) ((short)(LONG_PTR)(p) )
#define IntToPtr( i ) ((VOID *)(INT_PTR)((int)i))
#define UIntToPtr( ui ) ((VOID *)(UINT_PTR)((unsigned int)ui))
#define LongToPtr( l ) ((VOID *)(LONG_PTR)((long)l))
#define ULongToPtr( ul ) ((VOID *)(ULONG_PTR)((unsigned long)ul))
#define ShortToPtr( s ) ((VOID *)(INT_PTR)((short)s))
#define UShortToPtr( us ) ((VOID *)(UINT_PTR)((unsigned short)s))
#endif
#define HandleToUlong(h) HandleToULong(h)
#define UlongToHandle(ul) ULongToHandle(ul)
#define UlongToPtr(ul) ULongToPtr(ul)
#define UintToPtr(ui) UIntToPtr(ui)
typedef ULONG_PTR SIZE_T, *PSIZE_T;
typedef LONG_PTR SSIZE_T, *PSSIZE_T;
#ifndef SIZE_T_MAX
#define SIZE_T_MAX ULONG_PTR_MAX
#endif // SIZE_T_MAX
#ifndef SSIZE_T_MAX
#define SSIZE_T_MAX LONG_PTR_MAX
#endif
#ifndef SSIZE_T_MIN
#define SSIZE_T_MIN (ssize_t)I64(0x8000000000000000)
#endif
#ifndef PAL_STDCPP_COMPAT
#if defined(__APPLE_CC__) || defined(__linux__)
#ifdef HOST_64BIT
typedef unsigned long size_t;
typedef long ptrdiff_t;
#else // !HOST_64BIT
typedef unsigned int size_t;
typedef int ptrdiff_t;
#endif // !HOST_64BIT
#else
typedef ULONG_PTR size_t;
typedef LONG_PTR ptrdiff_t;
#endif
#endif // !PAL_STDCPP_COMPAT
#define _SIZE_T_DEFINED
typedef LONG_PTR LPARAM;
#define _PTRDIFF_T_DEFINED
#ifdef _MINGW_
// We need to define _PTRDIFF_T to make sure ptrdiff_t doesn't get defined
// again by system headers - but only for MinGW.
#define _PTRDIFF_T
#endif
typedef char16_t WCHAR;
#ifndef PAL_STDCPP_COMPAT
#if defined(__linux__)
#ifdef HOST_64BIT
typedef long int intptr_t;
typedef unsigned long int uintptr_t;
#else // !HOST_64BIT
typedef int intptr_t;
typedef unsigned int uintptr_t;
#endif // !HOST_64BIT
#else
typedef INT_PTR intptr_t;
typedef UINT_PTR uintptr_t;
#endif
#endif // PAL_STDCPP_COMPAT
#define _INTPTR_T_DEFINED
#define _UINTPTR_T_DEFINED
typedef DWORD LCID;
typedef PDWORD PLCID;
typedef WORD LANGID;
typedef DWORD LCTYPE;
typedef WCHAR *PWCHAR;
typedef WCHAR *LPWCH, *PWCH;
typedef CONST WCHAR *LPCWCH, *PCWCH;
typedef WCHAR *NWPSTR;
typedef WCHAR *LPWSTR, *PWSTR;
typedef CONST WCHAR *LPCWSTR, *PCWSTR;
typedef char CHAR;
typedef CHAR *PCHAR;
typedef CHAR *LPCH, *PCH;
typedef CONST CHAR *LPCCH, *PCCH;
typedef CHAR *NPSTR;
typedef CHAR *LPSTR, *PSTR;
typedef CONST CHAR *LPCSTR, *PCSTR;
#ifdef UNICODE
typedef WCHAR TCHAR;
typedef WCHAR _TCHAR;
#else
typedef CHAR TCHAR;
typedef CHAR _TCHAR;
#endif
typedef TCHAR *PTCHAR;
typedef TCHAR *LPTSTR, *PTSTR;
typedef CONST TCHAR *LPCTSTR;
#define MAKEWORD(a, b) ((WORD)(((BYTE)((DWORD_PTR)(a) & 0xff)) | ((WORD)((BYTE)((DWORD_PTR)(b) & 0xff))) << 8))
#define MAKELONG(a, b) ((LONG)(((WORD)((DWORD_PTR)(a) & 0xffff)) | ((DWORD)((WORD)((DWORD_PTR)(b) & 0xffff))) << 16))
#define LOWORD(l) ((WORD)((DWORD_PTR)(l) & 0xffff))
#define HIWORD(l) ((WORD)((DWORD_PTR)(l) >> 16))
#define LOBYTE(w) ((BYTE)((DWORD_PTR)(w) & 0xff))
#define HIBYTE(w) ((BYTE)((DWORD_PTR)(w) >> 8))
typedef VOID *HANDLE;
typedef HANDLE HWND;
typedef struct __PAL_RemoteHandle__ { HANDLE h; } *RHANDLE;
typedef HANDLE *PHANDLE;
typedef HANDLE *LPHANDLE;
#define INVALID_HANDLE_VALUE ((VOID *)(-1))
#define INVALID_FILE_SIZE ((DWORD)0xFFFFFFFF)
#define INVALID_FILE_ATTRIBUTES ((DWORD) -1)
typedef HANDLE HMODULE;
typedef HANDLE HINSTANCE;
typedef HANDLE HGLOBAL;
typedef HANDLE HLOCAL;
typedef HANDLE HRSRC;
typedef LONG HRESULT;
typedef LONG NTSTATUS;
typedef union _LARGE_INTEGER {
struct {
#if BIGENDIAN
LONG HighPart;
DWORD LowPart;
#else
DWORD LowPart;
LONG HighPart;
#endif
} u;
LONGLONG QuadPart;
} LARGE_INTEGER, *PLARGE_INTEGER;
#ifndef GUID_DEFINED
typedef struct _GUID {
ULONG Data1; // NOTE: diff from Win32, for LP64
USHORT Data2;
USHORT Data3;
UCHAR Data4[ 8 ];
} GUID;
typedef const GUID *LPCGUID;
#define GUID_DEFINED
#endif // !GUID_DEFINED
typedef struct _FILETIME {
DWORD dwLowDateTime;
DWORD dwHighDateTime;
} FILETIME, *PFILETIME, *LPFILETIME;
/* Code Page Default Values */
#define CP_ACP 0 /* default to ANSI code page */
#define CP_UTF8 65001 /* UTF-8 translation */
typedef PVOID PSID;
#ifdef __cplusplus
}
#endif
#endif // __PAL_MSTYPES_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
--*/
////////////////////////////////////////////////////////////////////////
// Extensions to the usual posix header files
////////////////////////////////////////////////////////////////////////
#ifndef __PAL_MSTYPES_H__
#define __PAL_MSTYPES_H__
#ifdef __cplusplus
extern "C" {
#endif
////////////////////////////////////////////////////////////////////////
// calling convention stuff
////////////////////////////////////////////////////////////////////////
#ifdef __cplusplus
#define EXTERN_C extern "C"
#else
#define EXTERN_C
#endif // __cplusplus
#ifndef _MSC_VER
// Note: Win32-hosted GCC predefines __stdcall and __cdecl, but Unix-
// hosted GCC does not.
#ifdef __i386__
#if !defined(__stdcall)
#define __stdcall __attribute__((stdcall))
#endif
#if !defined(_stdcall)
#define _stdcall __stdcall
#endif
#if !defined(__cdecl)
#define __cdecl __attribute__((cdecl))
#endif
#if !defined(_cdecl)
#define _cdecl __cdecl
#endif
#if !defined(CDECL)
#define CDECL __cdecl
#endif
#else // !defined(__i386__)
#define __stdcall
#define _stdcall
#define __cdecl
#define _cdecl
#define CDECL
// On ARM __fastcall is ignored and causes a compile error
#if !defined(PAL_STDCPP_COMPAT) || defined(__arm__)
# undef __fastcall
# undef _fastcall
# define __fastcall
# define _fastcall
#endif // !defined(PAL_STDCPP_COMPAT) || defined(__arm__)
#endif // !defined(__i386__)
#define CALLBACK __cdecl
#if !defined(_declspec)
#define _declspec(e) __declspec(e)
#endif
#if defined(_VAC_) && defined(__cplusplus)
#define __inline inline
#endif
#define __forceinline inline
#endif // !_MSC_VER
#ifdef _MSC_VER
#if defined(PAL_IMPLEMENTATION)
#define PALIMPORT
#else
#define PALIMPORT __declspec(dllimport)
#endif
#define DLLEXPORT __declspec(dllexport)
#define PAL_NORETURN __declspec(noreturn)
#else
#define PALIMPORT
#define DLLEXPORT __attribute__((visibility("default")))
#define PAL_NORETURN __attribute__((noreturn))
#endif
#define PALAPI DLLEXPORT __cdecl
#define PALAPI_NOEXPORT __cdecl
#define PALAPIV __cdecl
////////////////////////////////////////////////////////////////////////
// Type attribute stuff
////////////////////////////////////////////////////////////////////////
#define CONST const
#define IN
#define OUT
#define OPTIONAL
#define FAR
#ifdef UNICODE
#define __TEXT(x) L##x
#else
#define __TEXT(x) x
#endif
#define TEXT(x) __TEXT(x)
////////////////////////////////////////////////////////////////////////
// Some special values
////////////////////////////////////////////////////////////////////////
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
////////////////////////////////////////////////////////////////////////
// Misc. type helpers
////////////////////////////////////////////////////////////////////////
#ifdef _MSC_VER
// MSVC's way of declaring large integer constants
// If you define these in one step, without the _HELPER macros, you
// get extra whitespace when composing these with other concatenating macros.
#define I64_HELPER(x) x ## i64
#define I64(x) I64_HELPER(x)
#define UI64_HELPER(x) x ## ui64
#define UI64(x) UI64_HELPER(x)
#else // _MSC_VER
// GCC's way of declaring large integer constants
// If you define these in one step, without the _HELPER macros, you
// get extra whitespace when composing these with other concatenating macros.
#define I64_HELPER(x) x ## LL
#define I64(x) I64_HELPER(x)
#define UI64_HELPER(x) x ## ULL
#define UI64(x) UI64_HELPER(x)
#endif // _MSC_VER
////////////////////////////////////////////////////////////////////////
// Misc. types
////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER
// A bunch of source files (e.g. most of the ndp tree) include pal.h
// but are written to be LLP64, not LP64. (LP64 => long = 64 bits
// LLP64 => longs = 32 bits, long long = 64 bits)
//
// To handle this difference, we #define long to be int (and thus 32 bits) when
// compiling those files. (See the bottom of this file or search for
// #define long to see where we do this.)
//
// But this fix is more complicated than it seems, because we also use the
// preprocessor to #define __int64 to long for LP64 architectures (__int64
// isn't a builtin in gcc). We don't want __int64 to be an int (by cascading
// macro rules). So we play this little trick below where we add
// __cppmungestrip before "long", which is what we're really #defining __int64
// to. The preprocessor sees __cppmungestriplong as something different than
// long, so it doesn't replace it with int. The during the cppmunge phase, we
// remove the __cppmungestrip part, leaving long for the compiler to see.
//
// Note that we can't just use a typedef to define __int64 as long before
// #defining long because typedefed types can't be signedness-agnostic (i.e.
// they must be either signed or unsigned) and we want to be able to use
// __int64 as though it were intrinsic
#ifdef HOST_64BIT
#define __int64 long
#else // HOST_64BIT
#define __int64 long long
#endif // HOST_64BIT
#define __int32 int
#define __int16 short int
#define __int8 char // assumes char is signed
#endif // _MSC_VER
#ifndef PAL_STDCPP_COMPAT
// Defined in gnu's types.h. For non PAL_IMPLEMENTATION system
// includes are not included, so we need to define them.
#ifndef PAL_IMPLEMENTATION
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
#endif // PAL_IMPLEMENTATION
#ifndef _MSC_VER
#if HOST_64BIT
typedef long double LONG_DOUBLE;
#endif
#endif // _MSC_VER
#endif // !PAL_STDCPP_COMPAT
typedef void VOID;
typedef int LONG; // NOTE: diff from windows.h, for LP64 compat
typedef unsigned int ULONG; // NOTE: diff from windows.h, for LP64 compat
typedef __int64 LONGLONG;
typedef unsigned __int64 ULONGLONG;
typedef ULONGLONG DWORD64;
typedef DWORD64 *PDWORD64;
typedef LONGLONG *PLONG64;
typedef ULONGLONG *PULONG64;
typedef ULONGLONG *PULONGLONG;
typedef ULONG *PULONG;
typedef short SHORT;
typedef SHORT *PSHORT;
typedef unsigned short USHORT;
typedef USHORT *PUSHORT;
typedef unsigned char UCHAR;
typedef UCHAR *PUCHAR;
typedef char *PSZ;
typedef ULONGLONG DWORDLONG;
typedef unsigned int DWORD; // NOTE: diff from windows.h, for LP64 compat
typedef unsigned int DWORD32, *PDWORD32;
typedef int BOOL;
typedef unsigned char BYTE;
typedef unsigned short WORD;
typedef float FLOAT;
typedef double DOUBLE;
typedef BOOL *PBOOL;
typedef BOOL *LPBOOL;
typedef BYTE *PBYTE;
typedef BYTE *LPBYTE;
typedef const BYTE *LPCBYTE;
typedef int *PINT;
typedef int *LPINT;
typedef WORD *PWORD;
typedef WORD *LPWORD;
typedef LONG *LPLONG;
typedef LPLONG PLONG;
typedef DWORD *PDWORD;
typedef DWORD *LPDWORD;
typedef void *PVOID;
typedef void *LPVOID;
typedef CONST void *LPCVOID;
typedef int INT;
typedef unsigned int UINT;
typedef unsigned int *PUINT;
typedef BYTE BOOLEAN;
typedef BOOLEAN *PBOOLEAN;
typedef unsigned __int8 UINT8;
typedef signed __int8 INT8;
typedef unsigned __int16 UINT16;
typedef signed __int16 INT16;
typedef unsigned __int32 UINT32, *PUINT32;
typedef signed __int32 INT32, *PINT32;
typedef unsigned __int64 UINT64, *PUINT64;
typedef signed __int64 INT64, *PINT64;
typedef unsigned __int32 ULONG32, *PULONG32;
typedef signed __int32 LONG32, *PLONG32;
typedef unsigned __int64 ULONG64;
typedef signed __int64 LONG64;
#if defined(HOST_X86) && _MSC_VER >= 1300
#define _W64 __w64
#else
#define _W64
#endif
#ifdef HOST_64BIT
#define _atoi64 (__int64)atoll
typedef __int64 INT_PTR, *PINT_PTR;
typedef unsigned __int64 UINT_PTR, *PUINT_PTR;
typedef __int64 LONG_PTR, *PLONG_PTR;
typedef unsigned __int64 ULONG_PTR, *PULONG_PTR;
typedef unsigned __int64 DWORD_PTR, *PDWORD_PTR;
/* maximum signed 64 bit value */
#define LONG_PTR_MAX I64(9223372036854775807)
/* maximum unsigned 64 bit value */
#define ULONG_PTR_MAX UI64(0xffffffffffffffff)
#ifndef SIZE_MAX
#define SIZE_MAX _UI64_MAX
#endif
#define __int3264 __int64
#if !defined(HOST_64BIT)
__inline
unsigned long
HandleToULong(
const void *h
)
{
return((unsigned long) (ULONG_PTR) h );
}
__inline
long
HandleToLong(
const void *h
)
{
return((long) (LONG_PTR) h );
}
__inline
void *
ULongToHandle(
const unsigned long h
)
{
return((void *) (UINT_PTR) h );
}
__inline
void *
LongToHandle(
const long h
)
{
return((void *) (INT_PTR) h );
}
__inline
unsigned long
PtrToUlong(
const void *p
)
{
return((unsigned long) (ULONG_PTR) p );
}
__inline
unsigned int
PtrToUint(
const void *p
)
{
return((unsigned int) (UINT_PTR) p );
}
__inline
unsigned short
PtrToUshort(
const void *p
)
{
return((unsigned short) (unsigned long) (ULONG_PTR) p );
}
__inline
long
PtrToLong(
const void *p
)
{
return((long) (LONG_PTR) p );
}
__inline
int
PtrToInt(
const void *p
)
{
return((int) (INT_PTR) p );
}
__inline
short
PtrToShort(
const void *p
)
{
return((short) (long) (LONG_PTR) p );
}
__inline
void *
IntToPtr(
const int i
)
// Caution: IntToPtr() sign-extends the int value.
{
return( (void *)(INT_PTR)i );
}
__inline
void *
UIntToPtr(
const unsigned int ui
)
// Caution: UIntToPtr() zero-extends the unsigned int value.
{
return( (void *)(UINT_PTR)ui );
}
__inline
void *
LongToPtr(
const long l
)
// Caution: LongToPtr() sign-extends the long value.
{
return( (void *)(LONG_PTR)l );
}
__inline
void *
ULongToPtr(
const unsigned long ul
)
// Caution: ULongToPtr() zero-extends the unsigned long value.
{
return( (void *)(ULONG_PTR)ul );
}
__inline
void *
ShortToPtr(
const short s
)
// Caution: ShortToPtr() sign-extends the short value.
{
return( (void *)(INT_PTR)s );
}
__inline
void *
UShortToPtr(
const unsigned short us
)
// Caution: UShortToPtr() zero-extends the unsigned short value.
{
return( (void *)(UINT_PTR)us );
}
#else // !defined(HOST_64BIT)
#define HandleToULong( h ) ((ULONG)(ULONG_PTR)(h) )
#define HandleToLong( h ) ((LONG)(LONG_PTR) (h) )
#define ULongToHandle( ul ) ((HANDLE)(ULONG_PTR) (ul) )
#define LongToHandle( h ) ((HANDLE)(LONG_PTR) (h) )
#define PtrToUlong( p ) ((ULONG)(ULONG_PTR) (p) )
#define PtrToLong( p ) ((LONG)(LONG_PTR) (p) )
#define PtrToUint( p ) ((UINT)(UINT_PTR) (p) )
#define PtrToInt( p ) ((INT)(INT_PTR) (p) )
#define PtrToUshort( p ) ((unsigned short)(ULONG_PTR)(p) )
#define PtrToShort( p ) ((short)(LONG_PTR)(p) )
#define IntToPtr( i ) ((VOID *)(INT_PTR)((int)(i)))
#define UIntToPtr( ui ) ((VOID *)(UINT_PTR)((unsigned int)(ui)))
#define LongToPtr( l ) ((VOID *)(LONG_PTR)((long)(l)))
#define ULongToPtr( ul ) ((VOID *)(ULONG_PTR)((unsigned long)(ul)))
#define ShortToPtr( s ) ((VOID *)(INT_PTR)((short)(s)))
#define UShortToPtr( us ) ((VOID *)(UINT_PTR)((unsigned short)(s)))
#endif // !defined(HOST_64BIT)
#else
typedef _W64 __int32 INT_PTR;
typedef _W64 unsigned __int32 UINT_PTR;
typedef _W64 __int32 LONG_PTR;
typedef _W64 unsigned __int32 ULONG_PTR, *PULONG_PTR;
typedef _W64 unsigned __int32 DWORD_PTR, *PDWORD_PTR;
/* maximum signed 32 bit value */
#define LONG_PTR_MAX 2147483647L
/* maximum unsigned 32 bit value */
#define ULONG_PTR_MAX 0xffffffffUL
#ifndef SIZE_MAX
#define SIZE_MAX UINT_MAX
#endif
#define __int3264 __int32
#define HandleToULong( h ) ((ULONG)(ULONG_PTR)(h) )
#define HandleToLong( h ) ((LONG)(LONG_PTR) (h) )
#define ULongToHandle( ul ) ((HANDLE)(ULONG_PTR) (ul) )
#define LongToHandle( h ) ((HANDLE)(LONG_PTR) (h) )
#define PtrToUlong( p ) ((ULONG)(ULONG_PTR) (p) )
#define PtrToLong( p ) ((LONG)(LONG_PTR) (p) )
#define PtrToUint( p ) ((UINT)(UINT_PTR) (p) )
#define PtrToInt( p ) ((INT)(INT_PTR) (p) )
#define PtrToUshort( p ) ((unsigned short)(ULONG_PTR)(p) )
#define PtrToShort( p ) ((short)(LONG_PTR)(p) )
#define IntToPtr( i ) ((VOID *)(INT_PTR)((int)i))
#define UIntToPtr( ui ) ((VOID *)(UINT_PTR)((unsigned int)ui))
#define LongToPtr( l ) ((VOID *)(LONG_PTR)((long)l))
#define ULongToPtr( ul ) ((VOID *)(ULONG_PTR)((unsigned long)ul))
#define ShortToPtr( s ) ((VOID *)(INT_PTR)((short)s))
#define UShortToPtr( us ) ((VOID *)(UINT_PTR)((unsigned short)s))
#endif
#define HandleToUlong(h) HandleToULong(h)
#define UlongToHandle(ul) ULongToHandle(ul)
#define UlongToPtr(ul) ULongToPtr(ul)
#define UintToPtr(ui) UIntToPtr(ui)
typedef ULONG_PTR SIZE_T, *PSIZE_T;
typedef LONG_PTR SSIZE_T, *PSSIZE_T;
#ifndef SIZE_T_MAX
#define SIZE_T_MAX ULONG_PTR_MAX
#endif // SIZE_T_MAX
#ifndef SSIZE_T_MAX
#define SSIZE_T_MAX LONG_PTR_MAX
#endif
#ifndef SSIZE_T_MIN
#define SSIZE_T_MIN (ssize_t)I64(0x8000000000000000)
#endif
#ifndef PAL_STDCPP_COMPAT
#if defined(__APPLE_CC__) || defined(__linux__)
#ifdef HOST_64BIT
typedef unsigned long size_t;
typedef long ptrdiff_t;
#else // !HOST_64BIT
typedef unsigned int size_t;
typedef int ptrdiff_t;
#endif // !HOST_64BIT
#else
typedef ULONG_PTR size_t;
typedef LONG_PTR ptrdiff_t;
#endif
#endif // !PAL_STDCPP_COMPAT
#define _SIZE_T_DEFINED
typedef LONG_PTR LPARAM;
#define _PTRDIFF_T_DEFINED
#ifdef _MINGW_
// We need to define _PTRDIFF_T to make sure ptrdiff_t doesn't get defined
// again by system headers - but only for MinGW.
#define _PTRDIFF_T
#endif
typedef char16_t WCHAR;
#ifndef PAL_STDCPP_COMPAT
#if defined(__linux__)
#ifdef HOST_64BIT
typedef long int intptr_t;
typedef unsigned long int uintptr_t;
#else // !HOST_64BIT
typedef int intptr_t;
typedef unsigned int uintptr_t;
#endif // !HOST_64BIT
#else
typedef INT_PTR intptr_t;
typedef UINT_PTR uintptr_t;
#endif
#endif // PAL_STDCPP_COMPAT
#define _INTPTR_T_DEFINED
#define _UINTPTR_T_DEFINED
typedef DWORD LCID;
typedef PDWORD PLCID;
typedef WORD LANGID;
typedef DWORD LCTYPE;
typedef WCHAR *PWCHAR;
typedef WCHAR *LPWCH, *PWCH;
typedef CONST WCHAR *LPCWCH, *PCWCH;
typedef WCHAR *NWPSTR;
typedef WCHAR *LPWSTR, *PWSTR;
typedef CONST WCHAR *LPCWSTR, *PCWSTR;
typedef char CHAR;
typedef CHAR *PCHAR;
typedef CHAR *LPCH, *PCH;
typedef CONST CHAR *LPCCH, *PCCH;
typedef CHAR *NPSTR;
typedef CHAR *LPSTR, *PSTR;
typedef CONST CHAR *LPCSTR, *PCSTR;
#ifdef UNICODE
typedef WCHAR TCHAR;
typedef WCHAR _TCHAR;
#else
typedef CHAR TCHAR;
typedef CHAR _TCHAR;
#endif
typedef TCHAR *PTCHAR;
typedef TCHAR *LPTSTR, *PTSTR;
typedef CONST TCHAR *LPCTSTR;
#define MAKEWORD(a, b) ((WORD)(((BYTE)((DWORD_PTR)(a) & 0xff)) | ((WORD)((BYTE)((DWORD_PTR)(b) & 0xff))) << 8))
#define MAKELONG(a, b) ((LONG)(((WORD)((DWORD_PTR)(a) & 0xffff)) | ((DWORD)((WORD)((DWORD_PTR)(b) & 0xffff))) << 16))
#define LOWORD(l) ((WORD)((DWORD_PTR)(l) & 0xffff))
#define HIWORD(l) ((WORD)((DWORD_PTR)(l) >> 16))
#define LOBYTE(w) ((BYTE)((DWORD_PTR)(w) & 0xff))
#define HIBYTE(w) ((BYTE)((DWORD_PTR)(w) >> 8))
typedef VOID *HANDLE;
typedef HANDLE HWND;
typedef struct __PAL_RemoteHandle__ { HANDLE h; } *RHANDLE;
typedef HANDLE *PHANDLE;
typedef HANDLE *LPHANDLE;
#define INVALID_HANDLE_VALUE ((VOID *)(-1))
#define INVALID_FILE_SIZE ((DWORD)0xFFFFFFFF)
#define INVALID_FILE_ATTRIBUTES ((DWORD) -1)
typedef HANDLE HMODULE;
typedef HANDLE HINSTANCE;
typedef HANDLE HGLOBAL;
typedef HANDLE HLOCAL;
typedef HANDLE HRSRC;
typedef LONG HRESULT;
typedef LONG NTSTATUS;
typedef union _LARGE_INTEGER {
struct {
#if BIGENDIAN
LONG HighPart;
DWORD LowPart;
#else
DWORD LowPart;
LONG HighPart;
#endif
} u;
LONGLONG QuadPart;
} LARGE_INTEGER, *PLARGE_INTEGER;
#ifndef GUID_DEFINED
typedef struct _GUID {
ULONG Data1; // NOTE: diff from Win32, for LP64
USHORT Data2;
USHORT Data3;
UCHAR Data4[ 8 ];
} GUID;
typedef const GUID *LPCGUID;
#define GUID_DEFINED
#endif // !GUID_DEFINED
typedef struct _FILETIME {
DWORD dwLowDateTime;
DWORD dwHighDateTime;
} FILETIME, *PFILETIME, *LPFILETIME;
/* Code Page Default Values */
#define CP_ACP 0 /* default to ANSI code page */
#define CP_UTF8 65001 /* UTF-8 translation */
typedef PVOID PSID;
#ifdef __cplusplus
}
#endif
#endif // __PAL_MSTYPES_H__
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/native/corehost/bundle/manifest.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __MANIFEST_H__
#define __MANIFEST_H__
#include <list>
#include "file_entry.h"
#include "header.h"
namespace bundle
{
// Bundle Manifest contains:
// Series of file entries (for each embedded file)
class manifest_t
{
public:
manifest_t()
: m_files_need_extraction(false)
{
}
std::vector<file_entry_t> files;
static manifest_t read(reader_t &reader, const header_t &header);
bool files_need_extraction() const
{
return m_files_need_extraction;
}
private:
bool m_files_need_extraction;
};
}
#endif // __MANIFEST_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __MANIFEST_H__
#define __MANIFEST_H__
#include <list>
#include "file_entry.h"
#include "header.h"
namespace bundle
{
// Bundle Manifest contains:
// Series of file entries (for each embedded file)
class manifest_t
{
public:
manifest_t()
: m_files_need_extraction(false)
{
}
std::vector<file_entry_t> files;
static manifest_t read(reader_t &reader, const header_t &header);
bool files_need_extraction() const
{
return m_files_need_extraction;
}
private:
bool m_files_need_extraction;
};
}
#endif // __MANIFEST_H__
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/eventpipe/test/ep-session-tests.c | #if defined(_MSC_VER) && defined(_DEBUG)
#include "ep-tests-debug.h"
#endif
#include <eventpipe/ep.h>
#include <eventpipe/ep-config.h>
#include <eventpipe/ep-event.h>
#include <eventpipe/ep-session.h>
#include <eglib/test/test.h>
#define TEST_PROVIDER_NAME "MyTestProvider"
#define TEST_FILE "./ep_test_create_file.txt"
#ifdef _CRTDBG_MAP_ALLOC
static _CrtMemState eventpipe_memory_start_snapshot;
static _CrtMemState eventpipe_memory_end_snapshot;
static _CrtMemState eventpipe_memory_diff_snapshot;
#endif
static RESULT
test_session_setup (void)
{
#ifdef _CRTDBG_MAP_ALLOC
_CrtMemCheckpoint (&eventpipe_memory_start_snapshot);
#endif
return NULL;
}
static RESULT
test_create_delete_session (void)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeSession *test_session = NULL;
EventPipeProviderConfiguration provider_config;
EventPipeProviderConfiguration *current_provider_config = ep_provider_config_init (&provider_config, TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (current_provider_config != NULL);
test_location = 1;
EP_LOCK_ENTER (section1)
test_session = ep_session_alloc (
1,
TEST_FILE,
NULL,
EP_SESSION_TYPE_FILE,
EP_SERIALIZATION_FORMAT_NETTRACE_V4,
false,
1,
current_provider_config,
1,
NULL,
NULL);
EP_LOCK_EXIT (section1)
ep_raise_error_if_nok (test_session != NULL);
ep_on_exit:
ep_session_free (test_session);
ep_provider_config_fini (current_provider_config);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_add_session_providers (void)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeSession *test_session = NULL;
EventPipeSessionProvider *test_session_provider = NULL;
EventPipeProviderConfiguration provider_config;
EventPipeProviderConfiguration *current_provider_config = ep_provider_config_init (&provider_config, TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (current_provider_config != NULL);
test_location = 1;
EP_LOCK_ENTER (section1)
test_session = ep_session_alloc (
1,
TEST_FILE,
NULL,
EP_SESSION_TYPE_FILE,
EP_SERIALIZATION_FORMAT_NETTRACE_V4,
false,
1,
current_provider_config,
1,
NULL,
NULL);
ep_raise_error_if_nok_holding_lock (test_session != NULL, section1);
ep_session_start_streaming (test_session);
EP_LOCK_EXIT (section1)
test_location = 2;
EP_LOCK_ENTER (section2)
if (!ep_session_is_valid (test_session)) {
result = FAILED ("ep_session_is_valid returned false with session providers");
ep_raise_error_holding_lock (section2);
}
EP_LOCK_EXIT (section2)
test_location = 3;
test_session_provider = ep_session_provider_alloc (TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (test_session_provider != NULL);
test_location = 4;
EP_LOCK_ENTER (section3)
ep_session_add_session_provider (test_session, test_session_provider);
EP_LOCK_EXIT (section3)
test_session_provider = NULL;
EP_LOCK_ENTER (section4)
if (!ep_session_is_valid (test_session)) {
result = FAILED ("ep_session_is_valid returned false with session providers");
ep_raise_error_holding_lock (section4);
}
EP_LOCK_EXIT (section4)
test_location = 5;
ep_session_disable (test_session);
EP_LOCK_ENTER (section5)
if (ep_session_is_valid (test_session)) {
result = FAILED ("ep_session_is_valid returned true without session providers");
ep_raise_error_holding_lock (section5);
}
EP_LOCK_EXIT (section5)
ep_on_exit:
ep_session_free (test_session);
ep_provider_config_fini (current_provider_config);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_session_special_get_set (void)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeSession *test_session = NULL;
EventPipeProviderConfiguration provider_config;
EventPipeProviderConfiguration *current_provider_config = ep_provider_config_init (&provider_config, TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (current_provider_config != NULL);
test_location = 1;
EP_LOCK_ENTER (section1)
test_session = ep_session_alloc (
1,
TEST_FILE,
NULL,
EP_SESSION_TYPE_FILE,
EP_SERIALIZATION_FORMAT_NETTRACE_V4,
false,
1,
current_provider_config,
1,
NULL,
NULL);
EP_LOCK_EXIT (section1)
ep_raise_error_if_nok (test_session != NULL);
test_location = 2;
if (ep_session_get_rundown_enabled (test_session)) {
result = FAILED ("ep_session_get_rundown_enabled returned true, should be false");
ep_raise_error ();
}
test_location = 3;
ep_session_set_rundown_enabled (test_session, true);
if (!ep_session_get_rundown_enabled (test_session)) {
result = FAILED ("ep_session_get_rundown_enabled returned false, should be true");
ep_raise_error ();
}
test_location = 4;
if (ep_session_get_streaming_enabled (test_session)) {
result = FAILED ("ep_session_get_ipc_streaming_enabled returned true, should be false");
ep_raise_error ();
}
test_location = 5;
ep_session_set_streaming_enabled (test_session, true);
if (!ep_session_get_streaming_enabled (test_session)) {
result = FAILED ("ep_session_set_ipc_streaming_enabled returned false, should be true");
ep_raise_error ();
}
ep_session_set_streaming_enabled (test_session, false);
test_location = 6;
if (!ep_session_get_wait_event (test_session)) {
result = FAILED ("ep_session_get_wait_event failed");
ep_raise_error ();
}
test_location = 7;
if (!ep_session_get_mask (test_session)) {
result = FAILED ("Unexpected session mask");
ep_raise_error ();
}
ep_on_exit:
ep_session_free (test_session);
ep_provider_config_fini (current_provider_config);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_session_teardown (void)
{
#ifdef _CRTDBG_MAP_ALLOC
_CrtMemCheckpoint (&eventpipe_memory_end_snapshot);
if ( _CrtMemDifference( &eventpipe_memory_diff_snapshot, &eventpipe_memory_start_snapshot, &eventpipe_memory_end_snapshot) ) {
_CrtMemDumpStatistics( &eventpipe_memory_diff_snapshot );
return FAILED ("Memory leak detected!");
}
#endif
return NULL;
}
static Test ep_session_tests [] = {
{"test_session_setup", test_session_setup},
{"test_create_delete_session", test_create_delete_session},
{"test_add_session_providers", test_add_session_providers},
{"test_session_special_get_set", test_session_special_get_set},
{"test_session_teardown", test_session_teardown},
{NULL, NULL}
};
DEFINE_TEST_GROUP_INIT(ep_session_tests_init, ep_session_tests)
| #if defined(_MSC_VER) && defined(_DEBUG)
#include "ep-tests-debug.h"
#endif
#include <eventpipe/ep.h>
#include <eventpipe/ep-config.h>
#include <eventpipe/ep-event.h>
#include <eventpipe/ep-session.h>
#include <eglib/test/test.h>
#define TEST_PROVIDER_NAME "MyTestProvider"
#define TEST_FILE "./ep_test_create_file.txt"
#ifdef _CRTDBG_MAP_ALLOC
static _CrtMemState eventpipe_memory_start_snapshot;
static _CrtMemState eventpipe_memory_end_snapshot;
static _CrtMemState eventpipe_memory_diff_snapshot;
#endif
static RESULT
test_session_setup (void)
{
#ifdef _CRTDBG_MAP_ALLOC
_CrtMemCheckpoint (&eventpipe_memory_start_snapshot);
#endif
return NULL;
}
static RESULT
test_create_delete_session (void)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeSession *test_session = NULL;
EventPipeProviderConfiguration provider_config;
EventPipeProviderConfiguration *current_provider_config = ep_provider_config_init (&provider_config, TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (current_provider_config != NULL);
test_location = 1;
EP_LOCK_ENTER (section1)
test_session = ep_session_alloc (
1,
TEST_FILE,
NULL,
EP_SESSION_TYPE_FILE,
EP_SERIALIZATION_FORMAT_NETTRACE_V4,
false,
1,
current_provider_config,
1,
NULL,
NULL);
EP_LOCK_EXIT (section1)
ep_raise_error_if_nok (test_session != NULL);
ep_on_exit:
ep_session_free (test_session);
ep_provider_config_fini (current_provider_config);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_add_session_providers (void)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeSession *test_session = NULL;
EventPipeSessionProvider *test_session_provider = NULL;
EventPipeProviderConfiguration provider_config;
EventPipeProviderConfiguration *current_provider_config = ep_provider_config_init (&provider_config, TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (current_provider_config != NULL);
test_location = 1;
EP_LOCK_ENTER (section1)
test_session = ep_session_alloc (
1,
TEST_FILE,
NULL,
EP_SESSION_TYPE_FILE,
EP_SERIALIZATION_FORMAT_NETTRACE_V4,
false,
1,
current_provider_config,
1,
NULL,
NULL);
ep_raise_error_if_nok_holding_lock (test_session != NULL, section1);
ep_session_start_streaming (test_session);
EP_LOCK_EXIT (section1)
test_location = 2;
EP_LOCK_ENTER (section2)
if (!ep_session_is_valid (test_session)) {
result = FAILED ("ep_session_is_valid returned false with session providers");
ep_raise_error_holding_lock (section2);
}
EP_LOCK_EXIT (section2)
test_location = 3;
test_session_provider = ep_session_provider_alloc (TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (test_session_provider != NULL);
test_location = 4;
EP_LOCK_ENTER (section3)
ep_session_add_session_provider (test_session, test_session_provider);
EP_LOCK_EXIT (section3)
test_session_provider = NULL;
EP_LOCK_ENTER (section4)
if (!ep_session_is_valid (test_session)) {
result = FAILED ("ep_session_is_valid returned false with session providers");
ep_raise_error_holding_lock (section4);
}
EP_LOCK_EXIT (section4)
test_location = 5;
ep_session_disable (test_session);
EP_LOCK_ENTER (section5)
if (ep_session_is_valid (test_session)) {
result = FAILED ("ep_session_is_valid returned true without session providers");
ep_raise_error_holding_lock (section5);
}
EP_LOCK_EXIT (section5)
ep_on_exit:
ep_session_free (test_session);
ep_provider_config_fini (current_provider_config);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_session_special_get_set (void)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeSession *test_session = NULL;
EventPipeProviderConfiguration provider_config;
EventPipeProviderConfiguration *current_provider_config = ep_provider_config_init (&provider_config, TEST_PROVIDER_NAME, 1, EP_EVENT_LEVEL_LOGALWAYS, "");
ep_raise_error_if_nok (current_provider_config != NULL);
test_location = 1;
EP_LOCK_ENTER (section1)
test_session = ep_session_alloc (
1,
TEST_FILE,
NULL,
EP_SESSION_TYPE_FILE,
EP_SERIALIZATION_FORMAT_NETTRACE_V4,
false,
1,
current_provider_config,
1,
NULL,
NULL);
EP_LOCK_EXIT (section1)
ep_raise_error_if_nok (test_session != NULL);
test_location = 2;
if (ep_session_get_rundown_enabled (test_session)) {
result = FAILED ("ep_session_get_rundown_enabled returned true, should be false");
ep_raise_error ();
}
test_location = 3;
ep_session_set_rundown_enabled (test_session, true);
if (!ep_session_get_rundown_enabled (test_session)) {
result = FAILED ("ep_session_get_rundown_enabled returned false, should be true");
ep_raise_error ();
}
test_location = 4;
if (ep_session_get_streaming_enabled (test_session)) {
result = FAILED ("ep_session_get_ipc_streaming_enabled returned true, should be false");
ep_raise_error ();
}
test_location = 5;
ep_session_set_streaming_enabled (test_session, true);
if (!ep_session_get_streaming_enabled (test_session)) {
result = FAILED ("ep_session_set_ipc_streaming_enabled returned false, should be true");
ep_raise_error ();
}
ep_session_set_streaming_enabled (test_session, false);
test_location = 6;
if (!ep_session_get_wait_event (test_session)) {
result = FAILED ("ep_session_get_wait_event failed");
ep_raise_error ();
}
test_location = 7;
if (!ep_session_get_mask (test_session)) {
result = FAILED ("Unexpected session mask");
ep_raise_error ();
}
ep_on_exit:
ep_session_free (test_session);
ep_provider_config_fini (current_provider_config);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_session_teardown (void)
{
#ifdef _CRTDBG_MAP_ALLOC
_CrtMemCheckpoint (&eventpipe_memory_end_snapshot);
if ( _CrtMemDifference( &eventpipe_memory_diff_snapshot, &eventpipe_memory_start_snapshot, &eventpipe_memory_end_snapshot) ) {
_CrtMemDumpStatistics( &eventpipe_memory_diff_snapshot );
return FAILED ("Memory leak detected!");
}
#endif
return NULL;
}
static Test ep_session_tests [] = {
{"test_session_setup", test_session_setup},
{"test_create_delete_session", test_create_delete_session},
{"test_add_session_providers", test_add_session_providers},
{"test_session_special_get_set", test_session_special_get_set},
{"test_session_teardown", test_session_teardown},
{NULL, NULL}
};
DEFINE_TEST_GROUP_INIT(ep_session_tests_init, ep_session_tests)
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/inc/metadatatracker.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _METADATATRACKER_H_
#define _METADATATRACKER_H_
#if METADATATRACKER_ENABLED
#define METADATATRACKER_ONLY(s) (s)
#include "winbase.h"
#include "winwrap.h"
#include "holder.h"
#include "contract.h"
#include <limits.h>
#include <wchar.h>
#include <stdio.h>
#include "stdmacros.h"
#include "metamodelpub.h"
#define NUM_MD_SECTIONS (TBL_COUNT + MDPoolCount)
#define STRING_POOL (TBL_COUNT + MDPoolStrings)
#define GUID_POOL (TBL_COUNT + MDPoolGuids)
#define BLOB_POOL (TBL_COUNT + MDPoolBlobs)
#define USERSTRING_POOL (TBL_COUNT + MDPoolUSBlobs)
class MetaDataTracker
{
LPWSTR m_ModuleName;
BYTE *m_MetadataBase;
SIZE_T m_MetadataSize;
MetaDataTracker *m_next;
BYTE *m_mdSections[NUM_MD_SECTIONS];
SIZE_T m_mdSectionSize[NUM_MD_SECTIONS];
SIZE_T m_mdSectionRowSize[NUM_MD_SECTIONS];
BOOL m_bActivated;
static BOOL s_bEnabled;
static MetaDataTracker *m_MDTrackers;
public:
// callback into IBCLogger.cpp. Done this crummy way because we can't include IBCLogger.h here nor link
// to IBCLogger.cpp
static void (*s_IBCLogMetaDataAccess)(const void *addr);
static void (*s_IBCLogMetaDataSearch)(const void *result);
MetaDataTracker(BYTE *baseAddress, DWORD mdSize, LPCWSTR modName)
{
CONTRACTL
{
CONSTRUCTOR_CHECK;
THROWS;
GC_NOTRIGGER;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
m_ModuleName = NULL;
size_t len = wcslen(modName);
m_ModuleName = new WCHAR[len + 1];
NewArrayHolder<WCHAR> moduleNameHolder(m_ModuleName);
wcscpy_s((WCHAR *)m_ModuleName, len + 1, (WCHAR *)modName);
m_MetadataBase = baseAddress;
m_MetadataSize = mdSize;
m_next = m_MDTrackers;
m_MDTrackers = this;
memset (m_mdSections, 0, NUM_MD_SECTIONS*sizeof(BYTE*));
memset (m_mdSectionSize, 0, NUM_MD_SECTIONS*sizeof(SIZE_T));
moduleNameHolder.SuppressRelease();
}
~MetaDataTracker()
{
CONTRACTL
{
DESTRUCTOR_CHECK;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
}
CONTRACTL_END;
// Surely if we are dying, we are being deactivated as well
Deactivate();
if (m_ModuleName)
delete m_ModuleName;
// Remove this tracker from the global list of trackers
MetaDataTracker *mdMod = m_MDTrackers;
_ASSERTE (mdMod && "Trying to delete metadata tracker where none exist");
// If ours is the first tracker
if (mdMod == this)
{
m_MDTrackers = mdMod->m_next;
mdMod->m_next = NULL;
}
else
{
// Now traverse thru the list and maintain the prev ptr.
MetaDataTracker *mdModPrev = mdMod;
mdMod = mdMod->m_next;
while(mdMod)
{
if (mdMod == this)
{
mdModPrev->m_next = mdMod->m_next;
mdMod->m_next = NULL;
break;
}
mdModPrev = mdMod;
mdMod = mdMod->m_next;
}
}
}
static void Enable()
{ LIMITED_METHOD_CONTRACT;
s_bEnabled = TRUE;
}
static void Disable()
{ LIMITED_METHOD_CONTRACT;
s_bEnabled = FALSE;
}
static BOOL Enabled()
{ LIMITED_METHOD_CONTRACT;
return s_bEnabled;
}
static void NoteSection(DWORD secNum, void *address, size_t size, size_t rowSize)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (!Enabled())
return;
MetaDataTracker *mdMod = m_MDTrackers;
while( mdMod)
{
if (mdMod->NoteSectionInModule(secNum, address, size, rowSize))
return;
mdMod = mdMod->m_next;
}
}
// With logging disabled this quickly returns the address that was passed in
// this allows us to inline a smaller amount of code at callsites.
__forceinline static void* NoteAccess(void *address)
{
WRAPPER_NO_CONTRACT;
if (!Enabled())
return address;
return NoteAccessWorker(address);
}
NOINLINE static void* NoteAccessWorker(void *address)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (s_IBCLogMetaDataAccess != NULL)
s_IBCLogMetaDataAccess(address);
return address;
}
__forceinline static void NoteSearch(void *result)
{
WRAPPER_NO_CONTRACT;
if (!Enabled())
return;
NoteSearchWorker(result);
}
NOINLINE static void NoteSearchWorker(void *result)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (s_IBCLogMetaDataSearch != NULL && result != NULL)
s_IBCLogMetaDataSearch(result);
}
static MetaDataTracker * FindTracker(BYTE *_MDBaseAddress)
{
LIMITED_METHOD_CONTRACT;
if (!Enabled())
return NULL;
MetaDataTracker *mdMod = m_MDTrackers;
while( mdMod)
{
if (mdMod->m_MetadataBase == _MDBaseAddress)
return mdMod;
mdMod = mdMod->m_next;
}
return NULL;
}
void Activate()
{
LIMITED_METHOD_CONTRACT;
m_bActivated = TRUE;
}
void Deactivate()
{
LIMITED_METHOD_CONTRACT;
m_bActivated = FALSE;
}
BOOL IsActivated()
{
LIMITED_METHOD_CONTRACT;
return m_bActivated;
}
static MetaDataTracker *GetOrCreateMetaDataTracker (BYTE *baseAddress, DWORD mdSize, LPCWSTR modName)
{
CONTRACT(MetaDataTracker *)
{
THROWS;
GC_NOTRIGGER;
INJECT_FAULT(ThrowOutOfMemory());
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
MetaDataTracker *pTracker = NULL;
if (MetaDataTracker::Enabled())
{
pTracker = MetaDataTracker::FindTracker(baseAddress);
if (!pTracker)
{
FAULT_NOT_FATAL(); // It's ok - an OOM here is nonfatal
pTracker = new MetaDataTracker(baseAddress, mdSize, modName);
}
pTracker->Activate();
}
RETURN pTracker;
}
// Map a metadata address to a token for the purposes of the IBCLogger
static mdToken MapAddrToToken(const void *addr)
{
WRAPPER_NO_CONTRACT;
mdToken token = 0;
for (MetaDataTracker *mdMod = m_MDTrackers; mdMod; mdMod = mdMod->m_next)
{
token = mdMod->MapAddrToTokenInModule(addr);
if (token != 0)
break;
}
return token;
}
private:
// ***************************************************************************
// Helper functions
// ***************************************************************************
BOOL NoteSectionInModule(DWORD secNum, void *address, size_t size, size_t rowSize)
{
WRAPPER_NO_CONTRACT;
PREFAST_ASSUME(secNum < NUM_MD_SECTIONS);
if (address < m_MetadataBase || address >= (m_MetadataBase + m_MetadataSize))
return FALSE;
// This address range belongs to us but the tracker is not activated.
if (!IsActivated())
{
// _ASSERTE (!"Metadata Tracker not active but trying to access metadata");
return TRUE;
}
m_mdSections[secNum] = (BYTE *)address;
m_mdSectionSize[secNum] = size;
m_mdSectionRowSize[secNum] = rowSize;
return TRUE;
}
// Map a metadata address to a fake token for the purposes of the IBCLogger
mdToken MapAddrToTokenInModule(const void *addr)
{
LIMITED_METHOD_CONTRACT;
if (!IsActivated())
return 0;
BYTE *address = (BYTE *)addr;
if (address < m_MetadataBase || address >= (m_MetadataBase + m_MetadataSize))
return 0;
for (DWORD secNum = 0; secNum < NUM_MD_SECTIONS; secNum++)
{
if ((address >= m_mdSections[secNum]) && (address < m_mdSections[secNum] + m_mdSectionSize[secNum]))
{
DWORD rid = (DWORD)((address - m_mdSections[secNum])/m_mdSectionRowSize[secNum]);
if (secNum < TBL_COUNT)
rid++;
return TokenFromRid(rid, (secNum<<24));
}
}
return 0;
}
};
#else // METADATATRACKER_ENABLED
#define METADATATRACKER_ONLY(s)
#endif // METADATATRACKER_ENABLED
#endif // _METADATATRACKER_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _METADATATRACKER_H_
#define _METADATATRACKER_H_
#if METADATATRACKER_ENABLED
#define METADATATRACKER_ONLY(s) (s)
#include "winbase.h"
#include "winwrap.h"
#include "holder.h"
#include "contract.h"
#include <limits.h>
#include <wchar.h>
#include <stdio.h>
#include "stdmacros.h"
#include "metamodelpub.h"
#define NUM_MD_SECTIONS (TBL_COUNT + MDPoolCount)
#define STRING_POOL (TBL_COUNT + MDPoolStrings)
#define GUID_POOL (TBL_COUNT + MDPoolGuids)
#define BLOB_POOL (TBL_COUNT + MDPoolBlobs)
#define USERSTRING_POOL (TBL_COUNT + MDPoolUSBlobs)
class MetaDataTracker
{
LPWSTR m_ModuleName;
BYTE *m_MetadataBase;
SIZE_T m_MetadataSize;
MetaDataTracker *m_next;
BYTE *m_mdSections[NUM_MD_SECTIONS];
SIZE_T m_mdSectionSize[NUM_MD_SECTIONS];
SIZE_T m_mdSectionRowSize[NUM_MD_SECTIONS];
BOOL m_bActivated;
static BOOL s_bEnabled;
static MetaDataTracker *m_MDTrackers;
public:
// callback into IBCLogger.cpp. Done this crummy way because we can't include IBCLogger.h here nor link
// to IBCLogger.cpp
static void (*s_IBCLogMetaDataAccess)(const void *addr);
static void (*s_IBCLogMetaDataSearch)(const void *result);
MetaDataTracker(BYTE *baseAddress, DWORD mdSize, LPCWSTR modName)
{
CONTRACTL
{
CONSTRUCTOR_CHECK;
THROWS;
GC_NOTRIGGER;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
m_ModuleName = NULL;
size_t len = wcslen(modName);
m_ModuleName = new WCHAR[len + 1];
NewArrayHolder<WCHAR> moduleNameHolder(m_ModuleName);
wcscpy_s((WCHAR *)m_ModuleName, len + 1, (WCHAR *)modName);
m_MetadataBase = baseAddress;
m_MetadataSize = mdSize;
m_next = m_MDTrackers;
m_MDTrackers = this;
memset (m_mdSections, 0, NUM_MD_SECTIONS*sizeof(BYTE*));
memset (m_mdSectionSize, 0, NUM_MD_SECTIONS*sizeof(SIZE_T));
moduleNameHolder.SuppressRelease();
}
~MetaDataTracker()
{
CONTRACTL
{
DESTRUCTOR_CHECK;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
}
CONTRACTL_END;
// Surely if we are dying, we are being deactivated as well
Deactivate();
if (m_ModuleName)
delete m_ModuleName;
// Remove this tracker from the global list of trackers
MetaDataTracker *mdMod = m_MDTrackers;
_ASSERTE (mdMod && "Trying to delete metadata tracker where none exist");
// If ours is the first tracker
if (mdMod == this)
{
m_MDTrackers = mdMod->m_next;
mdMod->m_next = NULL;
}
else
{
// Now traverse thru the list and maintain the prev ptr.
MetaDataTracker *mdModPrev = mdMod;
mdMod = mdMod->m_next;
while(mdMod)
{
if (mdMod == this)
{
mdModPrev->m_next = mdMod->m_next;
mdMod->m_next = NULL;
break;
}
mdModPrev = mdMod;
mdMod = mdMod->m_next;
}
}
}
static void Enable()
{ LIMITED_METHOD_CONTRACT;
s_bEnabled = TRUE;
}
static void Disable()
{ LIMITED_METHOD_CONTRACT;
s_bEnabled = FALSE;
}
static BOOL Enabled()
{ LIMITED_METHOD_CONTRACT;
return s_bEnabled;
}
static void NoteSection(DWORD secNum, void *address, size_t size, size_t rowSize)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (!Enabled())
return;
MetaDataTracker *mdMod = m_MDTrackers;
while( mdMod)
{
if (mdMod->NoteSectionInModule(secNum, address, size, rowSize))
return;
mdMod = mdMod->m_next;
}
}
// With logging disabled this quickly returns the address that was passed in
// this allows us to inline a smaller amount of code at callsites.
__forceinline static void* NoteAccess(void *address)
{
WRAPPER_NO_CONTRACT;
if (!Enabled())
return address;
return NoteAccessWorker(address);
}
NOINLINE static void* NoteAccessWorker(void *address)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (s_IBCLogMetaDataAccess != NULL)
s_IBCLogMetaDataAccess(address);
return address;
}
__forceinline static void NoteSearch(void *result)
{
WRAPPER_NO_CONTRACT;
if (!Enabled())
return;
NoteSearchWorker(result);
}
NOINLINE static void NoteSearchWorker(void *result)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
if (s_IBCLogMetaDataSearch != NULL && result != NULL)
s_IBCLogMetaDataSearch(result);
}
static MetaDataTracker * FindTracker(BYTE *_MDBaseAddress)
{
LIMITED_METHOD_CONTRACT;
if (!Enabled())
return NULL;
MetaDataTracker *mdMod = m_MDTrackers;
while( mdMod)
{
if (mdMod->m_MetadataBase == _MDBaseAddress)
return mdMod;
mdMod = mdMod->m_next;
}
return NULL;
}
void Activate()
{
LIMITED_METHOD_CONTRACT;
m_bActivated = TRUE;
}
void Deactivate()
{
LIMITED_METHOD_CONTRACT;
m_bActivated = FALSE;
}
BOOL IsActivated()
{
LIMITED_METHOD_CONTRACT;
return m_bActivated;
}
static MetaDataTracker *GetOrCreateMetaDataTracker (BYTE *baseAddress, DWORD mdSize, LPCWSTR modName)
{
CONTRACT(MetaDataTracker *)
{
THROWS;
GC_NOTRIGGER;
INJECT_FAULT(ThrowOutOfMemory());
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
MetaDataTracker *pTracker = NULL;
if (MetaDataTracker::Enabled())
{
pTracker = MetaDataTracker::FindTracker(baseAddress);
if (!pTracker)
{
FAULT_NOT_FATAL(); // It's ok - an OOM here is nonfatal
pTracker = new MetaDataTracker(baseAddress, mdSize, modName);
}
pTracker->Activate();
}
RETURN pTracker;
}
// Map a metadata address to a token for the purposes of the IBCLogger
static mdToken MapAddrToToken(const void *addr)
{
WRAPPER_NO_CONTRACT;
mdToken token = 0;
for (MetaDataTracker *mdMod = m_MDTrackers; mdMod; mdMod = mdMod->m_next)
{
token = mdMod->MapAddrToTokenInModule(addr);
if (token != 0)
break;
}
return token;
}
private:
// ***************************************************************************
// Helper functions
// ***************************************************************************
BOOL NoteSectionInModule(DWORD secNum, void *address, size_t size, size_t rowSize)
{
WRAPPER_NO_CONTRACT;
PREFAST_ASSUME(secNum < NUM_MD_SECTIONS);
if (address < m_MetadataBase || address >= (m_MetadataBase + m_MetadataSize))
return FALSE;
// This address range belongs to us but the tracker is not activated.
if (!IsActivated())
{
// _ASSERTE (!"Metadata Tracker not active but trying to access metadata");
return TRUE;
}
m_mdSections[secNum] = (BYTE *)address;
m_mdSectionSize[secNum] = size;
m_mdSectionRowSize[secNum] = rowSize;
return TRUE;
}
// Map a metadata address to a fake token for the purposes of the IBCLogger
mdToken MapAddrToTokenInModule(const void *addr)
{
LIMITED_METHOD_CONTRACT;
if (!IsActivated())
return 0;
BYTE *address = (BYTE *)addr;
if (address < m_MetadataBase || address >= (m_MetadataBase + m_MetadataSize))
return 0;
for (DWORD secNum = 0; secNum < NUM_MD_SECTIONS; secNum++)
{
if ((address >= m_mdSections[secNum]) && (address < m_mdSections[secNum] + m_mdSectionSize[secNum]))
{
DWORD rid = (DWORD)((address - m_mdSections[secNum])/m_mdSectionRowSize[secNum]);
if (secNum < TBL_COUNT)
rid++;
return TokenFromRid(rid, (secNum<<24));
}
}
return 0;
}
};
#else // METADATATRACKER_ENABLED
#define METADATATRACKER_ONLY(s)
#endif // METADATATRACKER_ENABLED
#endif // _METADATATRACKER_H_
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/utils/os-event-unix.c | /**
* \file
* MonoOSEvent on Unix
*
* Author:
* Ludovic Henry ([email protected])
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "os-event.h"
#include "atomic.h"
#include "mono-lazy-init.h"
#include "mono-threads.h"
#include "mono-time.h"
static mono_lazy_init_t status = MONO_LAZY_INIT_STATUS_NOT_INITIALIZED;
static mono_mutex_t signal_mutex;
static void
initialize (void)
{
mono_os_mutex_init (&signal_mutex);
}
void
mono_os_event_init (MonoOSEvent *event, gboolean initial)
{
g_assert (event);
mono_lazy_initialize (&status, initialize);
event->conds = g_ptr_array_new ();
event->signalled = initial;
}
void
mono_os_event_destroy (MonoOSEvent *event)
{
g_assert (mono_lazy_is_initialized (&status));
g_assert (event);
if (event->conds->len > 0)
g_error ("%s: cannot destroy osevent, there are still %d threads waiting on it", __func__, event->conds->len);
g_ptr_array_free (event->conds, TRUE);
}
static gboolean
mono_os_event_is_signalled (MonoOSEvent *event)
{
return event->signalled;
}
void
mono_os_event_set (MonoOSEvent *event)
{
gsize i;
g_assert (mono_lazy_is_initialized (&status));
g_assert (event);
mono_os_mutex_lock (&signal_mutex);
event->signalled = TRUE;
for (i = 0; i < event->conds->len; ++i)
mono_os_cond_signal ((mono_cond_t*) event->conds->pdata [i]);
mono_os_mutex_unlock (&signal_mutex);
}
void
mono_os_event_reset (MonoOSEvent *event)
{
g_assert (mono_lazy_is_initialized (&status));
g_assert (event);
mono_os_mutex_lock (&signal_mutex);
event->signalled = FALSE;
mono_os_mutex_unlock (&signal_mutex);
}
MonoOSEventWaitRet
mono_os_event_wait_one (MonoOSEvent *event, guint32 timeout, gboolean alertable)
{
return mono_os_event_wait_multiple (&event, 1, TRUE, timeout, alertable);
}
typedef struct {
guint32 ref;
MonoOSEvent event;
} OSEventWaitData;
static void
signal_and_unref (gpointer user_data)
{
OSEventWaitData *data;
data = (OSEventWaitData*) user_data;
mono_os_event_set (&data->event);
if (mono_atomic_dec_i32 ((gint32*) &data->ref) == 0) {
mono_os_event_destroy (&data->event);
g_free (data);
}
}
MonoOSEventWaitRet
mono_os_event_wait_multiple (MonoOSEvent **events, gsize nevents, gboolean waitall, guint32 timeout, gboolean alertable)
{
MonoOSEventWaitRet ret;
mono_cond_t signal_cond;
OSEventWaitData *data = NULL;
gboolean alerted;
gint64 start = 0;
gint i;
g_assert (mono_lazy_is_initialized (&status));
g_assert (events);
g_assert (nevents > 0);
g_assert (nevents <= MONO_OS_EVENT_WAIT_MAXIMUM_OBJECTS);
for (i = 0; i < nevents; ++i)
g_assert (events [i]);
if (alertable) {
data = g_new0 (OSEventWaitData, 1);
data->ref = 2;
mono_os_event_init (&data->event, FALSE);
alerted = FALSE;
mono_thread_info_install_interrupt (signal_and_unref, data, &alerted);
if (alerted) {
mono_os_event_destroy (&data->event);
g_free (data);
return MONO_OS_EVENT_WAIT_RET_ALERTED;
}
}
if (timeout != MONO_INFINITE_WAIT)
start = mono_msec_ticks ();
mono_os_cond_init (&signal_cond);
mono_os_mutex_lock (&signal_mutex);
for (i = 0; i < nevents; ++i)
g_ptr_array_add (events [i]->conds, &signal_cond);
if (alertable)
g_ptr_array_add (data->event.conds, &signal_cond);
for (;;) {
gint count, lowest;
gboolean signalled;
count = 0;
lowest = -1;
for (i = 0; i < nevents; ++i) {
if (mono_os_event_is_signalled (events [i])) {
count += 1;
if (lowest == -1)
lowest = i;
}
}
if (alertable && mono_os_event_is_signalled (&data->event))
signalled = TRUE;
else if (waitall)
signalled = (count == nevents);
else /* waitany */
signalled = (count > 0);
if (signalled) {
ret = (MonoOSEventWaitRet)(MONO_OS_EVENT_WAIT_RET_SUCCESS_0 + lowest);
goto done;
}
if (timeout == MONO_INFINITE_WAIT) {
mono_os_cond_wait (&signal_cond, &signal_mutex);
} else {
gint64 elapsed;
gint res;
elapsed = mono_msec_ticks () - start;
if (elapsed >= timeout) {
ret = MONO_OS_EVENT_WAIT_RET_TIMEOUT;
goto done;
}
res = mono_os_cond_timedwait (&signal_cond, &signal_mutex, timeout - elapsed);
if (res != 0) {
ret = MONO_OS_EVENT_WAIT_RET_TIMEOUT;
goto done;
}
}
}
done:
for (i = 0; i < nevents; ++i)
g_ptr_array_remove (events [i]->conds, &signal_cond);
if (alertable)
g_ptr_array_remove (data->event.conds, &signal_cond);
mono_os_mutex_unlock (&signal_mutex);
mono_os_cond_destroy (&signal_cond);
if (alertable) {
mono_thread_info_uninstall_interrupt (&alerted);
if (alerted) {
if (mono_atomic_dec_i32 ((gint32*) &data->ref) == 0) {
mono_os_event_destroy (&data->event);
g_free (data);
}
return MONO_OS_EVENT_WAIT_RET_ALERTED;
}
mono_os_event_destroy (&data->event);
g_free (data);
}
return ret;
}
| /**
* \file
* MonoOSEvent on Unix
*
* Author:
* Ludovic Henry ([email protected])
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "os-event.h"
#include "atomic.h"
#include "mono-lazy-init.h"
#include "mono-threads.h"
#include "mono-time.h"
static mono_lazy_init_t status = MONO_LAZY_INIT_STATUS_NOT_INITIALIZED;
static mono_mutex_t signal_mutex;
static void
initialize (void)
{
mono_os_mutex_init (&signal_mutex);
}
void
mono_os_event_init (MonoOSEvent *event, gboolean initial)
{
g_assert (event);
mono_lazy_initialize (&status, initialize);
event->conds = g_ptr_array_new ();
event->signalled = initial;
}
void
mono_os_event_destroy (MonoOSEvent *event)
{
g_assert (mono_lazy_is_initialized (&status));
g_assert (event);
if (event->conds->len > 0)
g_error ("%s: cannot destroy osevent, there are still %d threads waiting on it", __func__, event->conds->len);
g_ptr_array_free (event->conds, TRUE);
}
static gboolean
mono_os_event_is_signalled (MonoOSEvent *event)
{
return event->signalled;
}
void
mono_os_event_set (MonoOSEvent *event)
{
gsize i;
g_assert (mono_lazy_is_initialized (&status));
g_assert (event);
mono_os_mutex_lock (&signal_mutex);
event->signalled = TRUE;
for (i = 0; i < event->conds->len; ++i)
mono_os_cond_signal ((mono_cond_t*) event->conds->pdata [i]);
mono_os_mutex_unlock (&signal_mutex);
}
void
mono_os_event_reset (MonoOSEvent *event)
{
g_assert (mono_lazy_is_initialized (&status));
g_assert (event);
mono_os_mutex_lock (&signal_mutex);
event->signalled = FALSE;
mono_os_mutex_unlock (&signal_mutex);
}
MonoOSEventWaitRet
mono_os_event_wait_one (MonoOSEvent *event, guint32 timeout, gboolean alertable)
{
return mono_os_event_wait_multiple (&event, 1, TRUE, timeout, alertable);
}
typedef struct {
guint32 ref;
MonoOSEvent event;
} OSEventWaitData;
static void
signal_and_unref (gpointer user_data)
{
OSEventWaitData *data;
data = (OSEventWaitData*) user_data;
mono_os_event_set (&data->event);
if (mono_atomic_dec_i32 ((gint32*) &data->ref) == 0) {
mono_os_event_destroy (&data->event);
g_free (data);
}
}
MonoOSEventWaitRet
mono_os_event_wait_multiple (MonoOSEvent **events, gsize nevents, gboolean waitall, guint32 timeout, gboolean alertable)
{
MonoOSEventWaitRet ret;
mono_cond_t signal_cond;
OSEventWaitData *data = NULL;
gboolean alerted;
gint64 start = 0;
gint i;
g_assert (mono_lazy_is_initialized (&status));
g_assert (events);
g_assert (nevents > 0);
g_assert (nevents <= MONO_OS_EVENT_WAIT_MAXIMUM_OBJECTS);
for (i = 0; i < nevents; ++i)
g_assert (events [i]);
if (alertable) {
data = g_new0 (OSEventWaitData, 1);
data->ref = 2;
mono_os_event_init (&data->event, FALSE);
alerted = FALSE;
mono_thread_info_install_interrupt (signal_and_unref, data, &alerted);
if (alerted) {
mono_os_event_destroy (&data->event);
g_free (data);
return MONO_OS_EVENT_WAIT_RET_ALERTED;
}
}
if (timeout != MONO_INFINITE_WAIT)
start = mono_msec_ticks ();
mono_os_cond_init (&signal_cond);
mono_os_mutex_lock (&signal_mutex);
for (i = 0; i < nevents; ++i)
g_ptr_array_add (events [i]->conds, &signal_cond);
if (alertable)
g_ptr_array_add (data->event.conds, &signal_cond);
for (;;) {
gint count, lowest;
gboolean signalled;
count = 0;
lowest = -1;
for (i = 0; i < nevents; ++i) {
if (mono_os_event_is_signalled (events [i])) {
count += 1;
if (lowest == -1)
lowest = i;
}
}
if (alertable && mono_os_event_is_signalled (&data->event))
signalled = TRUE;
else if (waitall)
signalled = (count == nevents);
else /* waitany */
signalled = (count > 0);
if (signalled) {
ret = (MonoOSEventWaitRet)(MONO_OS_EVENT_WAIT_RET_SUCCESS_0 + lowest);
goto done;
}
if (timeout == MONO_INFINITE_WAIT) {
mono_os_cond_wait (&signal_cond, &signal_mutex);
} else {
gint64 elapsed;
gint res;
elapsed = mono_msec_ticks () - start;
if (elapsed >= timeout) {
ret = MONO_OS_EVENT_WAIT_RET_TIMEOUT;
goto done;
}
res = mono_os_cond_timedwait (&signal_cond, &signal_mutex, timeout - elapsed);
if (res != 0) {
ret = MONO_OS_EVENT_WAIT_RET_TIMEOUT;
goto done;
}
}
}
done:
for (i = 0; i < nevents; ++i)
g_ptr_array_remove (events [i]->conds, &signal_cond);
if (alertable)
g_ptr_array_remove (data->event.conds, &signal_cond);
mono_os_mutex_unlock (&signal_mutex);
mono_os_cond_destroy (&signal_cond);
if (alertable) {
mono_thread_info_uninstall_interrupt (&alerted);
if (alerted) {
if (mono_atomic_dec_i32 ((gint32*) &data->ref) == 0) {
mono_os_event_destroy (&data->event);
g_free (data);
}
return MONO_OS_EVENT_WAIT_RET_ALERTED;
}
mono_os_event_destroy (&data->event);
g_free (data);
}
return ret;
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/pal/inc/rt/cpp/wchar.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: wchar.h
//
// ===========================================================================
// dummy wchar.h for PAL
#include "palrt.h"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: wchar.h
//
// ===========================================================================
// dummy wchar.h for PAL
#include "palrt.h"
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/native/external/brotli/enc/hash_composite_inc.h | /* NOLINT(build/header_guard) */
/* Copyright 2018 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, HASHER_A, HASHER_B */
/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A
and HASHER_B. */
#define HashComposite HASHER()
#define FN_A(X) EXPAND_CAT(X, HASHER_A)
#define FN_B(X) EXPAND_CAT(X, HASHER_B)
static BROTLI_INLINE size_t FN(HashTypeLength)(void) {
size_t a = FN_A(HashTypeLength)();
size_t b = FN_B(HashTypeLength)();
return a > b ? a : b;
}
static BROTLI_INLINE size_t FN(StoreLookahead)(void) {
size_t a = FN_A(StoreLookahead)();
size_t b = FN_B(StoreLookahead)();
return a > b ? a : b;
}
typedef struct HashComposite {
HASHER_A ha;
HASHER_B hb;
HasherCommon hb_common;
/* Shortcuts. */
void* extra;
HasherCommon* common;
BROTLI_BOOL fresh;
const BrotliEncoderParams* params;
} HashComposite;
static void FN(Initialize)(HasherCommon* common,
HashComposite* BROTLI_RESTRICT self, const BrotliEncoderParams* params) {
self->common = common;
self->extra = common->extra;
self->hb_common = *self->common;
self->fresh = BROTLI_TRUE;
self->params = params;
/* TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers FN(Initialize) */
}
static void FN(Prepare)(
HashComposite* BROTLI_RESTRICT self, BROTLI_BOOL one_shot,
size_t input_size, const uint8_t* BROTLI_RESTRICT data) {
if (self->fresh) {
self->fresh = BROTLI_FALSE;
self->hb_common.extra = (uint8_t*)self->extra +
FN_A(HashMemAllocInBytes)(self->params, one_shot, input_size);
FN_A(Initialize)(self->common, &self->ha, self->params);
FN_B(Initialize)(&self->hb_common, &self->hb, self->params);
}
FN_A(Prepare)(&self->ha, one_shot, input_size, data);
FN_B(Prepare)(&self->hb, one_shot, input_size, data);
}
static BROTLI_INLINE size_t FN(HashMemAllocInBytes)(
const BrotliEncoderParams* params, BROTLI_BOOL one_shot,
size_t input_size) {
return FN_A(HashMemAllocInBytes)(params, one_shot, input_size) +
FN_B(HashMemAllocInBytes)(params, one_shot, input_size);
}
static BROTLI_INLINE void FN(Store)(HashComposite* BROTLI_RESTRICT self,
const uint8_t* BROTLI_RESTRICT data, const size_t mask, const size_t ix) {
FN_A(Store)(&self->ha, data, mask, ix);
FN_B(Store)(&self->hb, data, mask, ix);
}
static BROTLI_INLINE void FN(StoreRange)(
HashComposite* BROTLI_RESTRICT self, const uint8_t* BROTLI_RESTRICT data,
const size_t mask, const size_t ix_start,
const size_t ix_end) {
FN_A(StoreRange)(&self->ha, data, mask, ix_start, ix_end);
FN_B(StoreRange)(&self->hb, data, mask, ix_start, ix_end);
}
static BROTLI_INLINE void FN(StitchToPreviousBlock)(
HashComposite* BROTLI_RESTRICT self,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ring_buffer_mask) {
FN_A(StitchToPreviousBlock)(&self->ha, num_bytes, position,
ringbuffer, ring_buffer_mask);
FN_B(StitchToPreviousBlock)(&self->hb, num_bytes, position,
ringbuffer, ring_buffer_mask);
}
static BROTLI_INLINE void FN(PrepareDistanceCache)(
HashComposite* BROTLI_RESTRICT self, int* BROTLI_RESTRICT distance_cache) {
FN_A(PrepareDistanceCache)(&self->ha, distance_cache);
FN_B(PrepareDistanceCache)(&self->hb, distance_cache);
}
static BROTLI_INLINE void FN(FindLongestMatch)(
HashComposite* BROTLI_RESTRICT self,
const BrotliEncoderDictionary* dictionary,
const uint8_t* BROTLI_RESTRICT data, const size_t ring_buffer_mask,
const int* BROTLI_RESTRICT distance_cache, const size_t cur_ix,
const size_t max_length, const size_t max_backward,
const size_t dictionary_distance, const size_t max_distance,
HasherSearchResult* BROTLI_RESTRICT out) {
FN_A(FindLongestMatch)(&self->ha, dictionary, data, ring_buffer_mask,
distance_cache, cur_ix, max_length, max_backward, dictionary_distance,
max_distance, out);
FN_B(FindLongestMatch)(&self->hb, dictionary, data, ring_buffer_mask,
distance_cache, cur_ix, max_length, max_backward, dictionary_distance,
max_distance, out);
}
#undef HashComposite
| /* NOLINT(build/header_guard) */
/* Copyright 2018 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, HASHER_A, HASHER_B */
/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A
and HASHER_B. */
#define HashComposite HASHER()
#define FN_A(X) EXPAND_CAT(X, HASHER_A)
#define FN_B(X) EXPAND_CAT(X, HASHER_B)
static BROTLI_INLINE size_t FN(HashTypeLength)(void) {
size_t a = FN_A(HashTypeLength)();
size_t b = FN_B(HashTypeLength)();
return a > b ? a : b;
}
static BROTLI_INLINE size_t FN(StoreLookahead)(void) {
size_t a = FN_A(StoreLookahead)();
size_t b = FN_B(StoreLookahead)();
return a > b ? a : b;
}
typedef struct HashComposite {
HASHER_A ha;
HASHER_B hb;
HasherCommon hb_common;
/* Shortcuts. */
void* extra;
HasherCommon* common;
BROTLI_BOOL fresh;
const BrotliEncoderParams* params;
} HashComposite;
static void FN(Initialize)(HasherCommon* common,
HashComposite* BROTLI_RESTRICT self, const BrotliEncoderParams* params) {
self->common = common;
self->extra = common->extra;
self->hb_common = *self->common;
self->fresh = BROTLI_TRUE;
self->params = params;
/* TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers FN(Initialize) */
}
static void FN(Prepare)(
HashComposite* BROTLI_RESTRICT self, BROTLI_BOOL one_shot,
size_t input_size, const uint8_t* BROTLI_RESTRICT data) {
if (self->fresh) {
self->fresh = BROTLI_FALSE;
self->hb_common.extra = (uint8_t*)self->extra +
FN_A(HashMemAllocInBytes)(self->params, one_shot, input_size);
FN_A(Initialize)(self->common, &self->ha, self->params);
FN_B(Initialize)(&self->hb_common, &self->hb, self->params);
}
FN_A(Prepare)(&self->ha, one_shot, input_size, data);
FN_B(Prepare)(&self->hb, one_shot, input_size, data);
}
static BROTLI_INLINE size_t FN(HashMemAllocInBytes)(
const BrotliEncoderParams* params, BROTLI_BOOL one_shot,
size_t input_size) {
return FN_A(HashMemAllocInBytes)(params, one_shot, input_size) +
FN_B(HashMemAllocInBytes)(params, one_shot, input_size);
}
static BROTLI_INLINE void FN(Store)(HashComposite* BROTLI_RESTRICT self,
const uint8_t* BROTLI_RESTRICT data, const size_t mask, const size_t ix) {
FN_A(Store)(&self->ha, data, mask, ix);
FN_B(Store)(&self->hb, data, mask, ix);
}
static BROTLI_INLINE void FN(StoreRange)(
HashComposite* BROTLI_RESTRICT self, const uint8_t* BROTLI_RESTRICT data,
const size_t mask, const size_t ix_start,
const size_t ix_end) {
FN_A(StoreRange)(&self->ha, data, mask, ix_start, ix_end);
FN_B(StoreRange)(&self->hb, data, mask, ix_start, ix_end);
}
static BROTLI_INLINE void FN(StitchToPreviousBlock)(
HashComposite* BROTLI_RESTRICT self,
size_t num_bytes, size_t position, const uint8_t* ringbuffer,
size_t ring_buffer_mask) {
FN_A(StitchToPreviousBlock)(&self->ha, num_bytes, position,
ringbuffer, ring_buffer_mask);
FN_B(StitchToPreviousBlock)(&self->hb, num_bytes, position,
ringbuffer, ring_buffer_mask);
}
static BROTLI_INLINE void FN(PrepareDistanceCache)(
HashComposite* BROTLI_RESTRICT self, int* BROTLI_RESTRICT distance_cache) {
FN_A(PrepareDistanceCache)(&self->ha, distance_cache);
FN_B(PrepareDistanceCache)(&self->hb, distance_cache);
}
static BROTLI_INLINE void FN(FindLongestMatch)(
HashComposite* BROTLI_RESTRICT self,
const BrotliEncoderDictionary* dictionary,
const uint8_t* BROTLI_RESTRICT data, const size_t ring_buffer_mask,
const int* BROTLI_RESTRICT distance_cache, const size_t cur_ix,
const size_t max_length, const size_t max_backward,
const size_t dictionary_distance, const size_t max_distance,
HasherSearchResult* BROTLI_RESTRICT out) {
FN_A(FindLongestMatch)(&self->ha, dictionary, data, ring_buffer_mask,
distance_cache, cur_ix, max_length, max_backward, dictionary_distance,
max_distance, out);
FN_B(FindLongestMatch)(&self->hb, dictionary, data, ring_buffer_mask,
distance_cache, cur_ix, max_length, max_backward, dictionary_distance,
max_distance, out);
}
#undef HashComposite
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/native/external/brotli/enc/cluster_inc.h | /* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, CODE */
#define HistogramType FN(Histogram)
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
BROTLI_INTERNAL void FN(BrotliCompareAndPushToQueue)(
const HistogramType* out, const uint32_t* cluster_size, uint32_t idx1,
uint32_t idx2, size_t max_num_pairs, HistogramPair* pairs,
size_t* num_pairs) CODE({
BROTLI_BOOL is_good_pair = BROTLI_FALSE;
HistogramPair p;
p.idx1 = p.idx2 = 0;
p.cost_diff = p.cost_combo = 0;
if (idx1 == idx2) {
return;
}
if (idx2 < idx1) {
uint32_t t = idx2;
idx2 = idx1;
idx1 = t;
}
p.idx1 = idx1;
p.idx2 = idx2;
p.cost_diff = 0.5 * ClusterCostDiff(cluster_size[idx1], cluster_size[idx2]);
p.cost_diff -= out[idx1].bit_cost_;
p.cost_diff -= out[idx2].bit_cost_;
if (out[idx1].total_count_ == 0) {
p.cost_combo = out[idx2].bit_cost_;
is_good_pair = BROTLI_TRUE;
} else if (out[idx2].total_count_ == 0) {
p.cost_combo = out[idx1].bit_cost_;
is_good_pair = BROTLI_TRUE;
} else {
double threshold = *num_pairs == 0 ? 1e99 :
BROTLI_MAX(double, 0.0, pairs[0].cost_diff);
HistogramType combo = out[idx1];
double cost_combo;
FN(HistogramAddHistogram)(&combo, &out[idx2]);
cost_combo = FN(BrotliPopulationCost)(&combo);
if (cost_combo < threshold - p.cost_diff) {
p.cost_combo = cost_combo;
is_good_pair = BROTLI_TRUE;
}
}
if (is_good_pair) {
p.cost_diff += p.cost_combo;
if (*num_pairs > 0 && HistogramPairIsLess(&pairs[0], &p)) {
/* Replace the top of the queue if needed. */
if (*num_pairs < max_num_pairs) {
pairs[*num_pairs] = pairs[0];
++(*num_pairs);
}
pairs[0] = p;
} else if (*num_pairs < max_num_pairs) {
pairs[*num_pairs] = p;
++(*num_pairs);
}
}
})
BROTLI_INTERNAL size_t FN(BrotliHistogramCombine)(HistogramType* out,
uint32_t* cluster_size,
uint32_t* symbols,
uint32_t* clusters,
HistogramPair* pairs,
size_t num_clusters,
size_t symbols_size,
size_t max_clusters,
size_t max_num_pairs) CODE({
double cost_diff_threshold = 0.0;
size_t min_cluster_size = 1;
size_t num_pairs = 0;
{
/* We maintain a vector of histogram pairs, with the property that the pair
with the maximum bit cost reduction is the first. */
size_t idx1;
for (idx1 = 0; idx1 < num_clusters; ++idx1) {
size_t idx2;
for (idx2 = idx1 + 1; idx2 < num_clusters; ++idx2) {
FN(BrotliCompareAndPushToQueue)(out, cluster_size, clusters[idx1],
clusters[idx2], max_num_pairs, &pairs[0], &num_pairs);
}
}
}
while (num_clusters > min_cluster_size) {
uint32_t best_idx1;
uint32_t best_idx2;
size_t i;
if (pairs[0].cost_diff >= cost_diff_threshold) {
cost_diff_threshold = 1e99;
min_cluster_size = max_clusters;
continue;
}
/* Take the best pair from the top of heap. */
best_idx1 = pairs[0].idx1;
best_idx2 = pairs[0].idx2;
FN(HistogramAddHistogram)(&out[best_idx1], &out[best_idx2]);
out[best_idx1].bit_cost_ = pairs[0].cost_combo;
cluster_size[best_idx1] += cluster_size[best_idx2];
for (i = 0; i < symbols_size; ++i) {
if (symbols[i] == best_idx2) {
symbols[i] = best_idx1;
}
}
for (i = 0; i < num_clusters; ++i) {
if (clusters[i] == best_idx2) {
memmove(&clusters[i], &clusters[i + 1],
(num_clusters - i - 1) * sizeof(clusters[0]));
break;
}
}
--num_clusters;
{
/* Remove pairs intersecting the just combined best pair. */
size_t copy_to_idx = 0;
for (i = 0; i < num_pairs; ++i) {
HistogramPair* p = &pairs[i];
if (p->idx1 == best_idx1 || p->idx2 == best_idx1 ||
p->idx1 == best_idx2 || p->idx2 == best_idx2) {
/* Remove invalid pair from the queue. */
continue;
}
if (HistogramPairIsLess(&pairs[0], p)) {
/* Replace the top of the queue if needed. */
HistogramPair front = pairs[0];
pairs[0] = *p;
pairs[copy_to_idx] = front;
} else {
pairs[copy_to_idx] = *p;
}
++copy_to_idx;
}
num_pairs = copy_to_idx;
}
/* Push new pairs formed with the combined histogram to the heap. */
for (i = 0; i < num_clusters; ++i) {
FN(BrotliCompareAndPushToQueue)(out, cluster_size, best_idx1, clusters[i],
max_num_pairs, &pairs[0], &num_pairs);
}
}
return num_clusters;
})
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
BROTLI_INTERNAL double FN(BrotliHistogramBitCostDistance)(
const HistogramType* histogram, const HistogramType* candidate) CODE({
if (histogram->total_count_ == 0) {
return 0.0;
} else {
HistogramType tmp = *histogram;
FN(HistogramAddHistogram)(&tmp, candidate);
return FN(BrotliPopulationCost)(&tmp) - candidate->bit_cost_;
}
})
/* Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
BROTLI_INTERNAL void FN(BrotliHistogramRemap)(const HistogramType* in,
size_t in_size, const uint32_t* clusters, size_t num_clusters,
HistogramType* out, uint32_t* symbols) CODE({
size_t i;
for (i = 0; i < in_size; ++i) {
uint32_t best_out = i == 0 ? symbols[0] : symbols[i - 1];
double best_bits =
FN(BrotliHistogramBitCostDistance)(&in[i], &out[best_out]);
size_t j;
for (j = 0; j < num_clusters; ++j) {
const double cur_bits =
FN(BrotliHistogramBitCostDistance)(&in[i], &out[clusters[j]]);
if (cur_bits < best_bits) {
best_bits = cur_bits;
best_out = clusters[j];
}
}
symbols[i] = best_out;
}
/* Recompute each out based on raw and symbols. */
for (i = 0; i < num_clusters; ++i) {
FN(HistogramClear)(&out[clusters[i]]);
}
for (i = 0; i < in_size; ++i) {
FN(HistogramAddHistogram)(&out[symbols[i]], &in[i]);
}
})
/* Reorders elements of the out[0..length) array and changes values in
symbols[0..length) array in the following way:
* when called, symbols[] contains indexes into out[], and has N unique
values (possibly N < length)
* on return, symbols'[i] = f(symbols[i]) and
out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length,
where f is a bijection between the range of symbols[] and [0..N), and
the first occurrences of values in symbols'[i] come in consecutive
increasing order.
Returns N, the number of unique values in symbols[]. */
BROTLI_INTERNAL size_t FN(BrotliHistogramReindex)(MemoryManager* m,
HistogramType* out, uint32_t* symbols, size_t length) CODE({
static const uint32_t kInvalidIndex = BROTLI_UINT32_MAX;
uint32_t* new_index = BROTLI_ALLOC(m, uint32_t, length);
uint32_t next_index;
HistogramType* tmp;
size_t i;
if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(new_index)) return 0;
for (i = 0; i < length; ++i) {
new_index[i] = kInvalidIndex;
}
next_index = 0;
for (i = 0; i < length; ++i) {
if (new_index[symbols[i]] == kInvalidIndex) {
new_index[symbols[i]] = next_index;
++next_index;
}
}
/* TODO: by using idea of "cycle-sort" we can avoid allocation of
tmp and reduce the number of copying by the factor of 2. */
tmp = BROTLI_ALLOC(m, HistogramType, next_index);
if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tmp)) return 0;
next_index = 0;
for (i = 0; i < length; ++i) {
if (new_index[symbols[i]] == next_index) {
tmp[next_index] = out[symbols[i]];
++next_index;
}
symbols[i] = new_index[symbols[i]];
}
BROTLI_FREE(m, new_index);
for (i = 0; i < next_index; ++i) {
out[i] = tmp[i];
}
BROTLI_FREE(m, tmp);
return next_index;
})
BROTLI_INTERNAL void FN(BrotliClusterHistograms)(
MemoryManager* m, const HistogramType* in, const size_t in_size,
size_t max_histograms, HistogramType* out, size_t* out_size,
uint32_t* histogram_symbols) CODE({
uint32_t* cluster_size = BROTLI_ALLOC(m, uint32_t, in_size);
uint32_t* clusters = BROTLI_ALLOC(m, uint32_t, in_size);
size_t num_clusters = 0;
const size_t max_input_histograms = 64;
size_t pairs_capacity = max_input_histograms * max_input_histograms / 2;
/* For the first pass of clustering, we allow all pairs. */
HistogramPair* pairs = BROTLI_ALLOC(m, HistogramPair, pairs_capacity + 1);
size_t i;
if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(cluster_size) ||
BROTLI_IS_NULL(clusters) || BROTLI_IS_NULL(pairs)) {
return;
}
for (i = 0; i < in_size; ++i) {
cluster_size[i] = 1;
}
for (i = 0; i < in_size; ++i) {
out[i] = in[i];
out[i].bit_cost_ = FN(BrotliPopulationCost)(&in[i]);
histogram_symbols[i] = (uint32_t)i;
}
for (i = 0; i < in_size; i += max_input_histograms) {
size_t num_to_combine =
BROTLI_MIN(size_t, in_size - i, max_input_histograms);
size_t num_new_clusters;
size_t j;
for (j = 0; j < num_to_combine; ++j) {
clusters[num_clusters + j] = (uint32_t)(i + j);
}
num_new_clusters =
FN(BrotliHistogramCombine)(out, cluster_size,
&histogram_symbols[i],
&clusters[num_clusters], pairs,
num_to_combine, num_to_combine,
max_histograms, pairs_capacity);
num_clusters += num_new_clusters;
}
{
/* For the second pass, we limit the total number of histogram pairs.
After this limit is reached, we only keep searching for the best pair. */
size_t max_num_pairs = BROTLI_MIN(size_t,
64 * num_clusters, (num_clusters / 2) * num_clusters);
BROTLI_ENSURE_CAPACITY(
m, HistogramPair, pairs, pairs_capacity, max_num_pairs + 1);
if (BROTLI_IS_OOM(m)) return;
/* Collapse similar histograms. */
num_clusters = FN(BrotliHistogramCombine)(out, cluster_size,
histogram_symbols, clusters,
pairs, num_clusters, in_size,
max_histograms, max_num_pairs);
}
BROTLI_FREE(m, pairs);
BROTLI_FREE(m, cluster_size);
/* Find the optimal map from original histograms to the final ones. */
FN(BrotliHistogramRemap)(in, in_size, clusters, num_clusters,
out, histogram_symbols);
BROTLI_FREE(m, clusters);
/* Convert the context map to a canonical form. */
*out_size = FN(BrotliHistogramReindex)(m, out, histogram_symbols, in_size);
if (BROTLI_IS_OOM(m)) return;
})
#undef HistogramType
| /* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, CODE */
#define HistogramType FN(Histogram)
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
BROTLI_INTERNAL void FN(BrotliCompareAndPushToQueue)(
const HistogramType* out, const uint32_t* cluster_size, uint32_t idx1,
uint32_t idx2, size_t max_num_pairs, HistogramPair* pairs,
size_t* num_pairs) CODE({
BROTLI_BOOL is_good_pair = BROTLI_FALSE;
HistogramPair p;
p.idx1 = p.idx2 = 0;
p.cost_diff = p.cost_combo = 0;
if (idx1 == idx2) {
return;
}
if (idx2 < idx1) {
uint32_t t = idx2;
idx2 = idx1;
idx1 = t;
}
p.idx1 = idx1;
p.idx2 = idx2;
p.cost_diff = 0.5 * ClusterCostDiff(cluster_size[idx1], cluster_size[idx2]);
p.cost_diff -= out[idx1].bit_cost_;
p.cost_diff -= out[idx2].bit_cost_;
if (out[idx1].total_count_ == 0) {
p.cost_combo = out[idx2].bit_cost_;
is_good_pair = BROTLI_TRUE;
} else if (out[idx2].total_count_ == 0) {
p.cost_combo = out[idx1].bit_cost_;
is_good_pair = BROTLI_TRUE;
} else {
double threshold = *num_pairs == 0 ? 1e99 :
BROTLI_MAX(double, 0.0, pairs[0].cost_diff);
HistogramType combo = out[idx1];
double cost_combo;
FN(HistogramAddHistogram)(&combo, &out[idx2]);
cost_combo = FN(BrotliPopulationCost)(&combo);
if (cost_combo < threshold - p.cost_diff) {
p.cost_combo = cost_combo;
is_good_pair = BROTLI_TRUE;
}
}
if (is_good_pair) {
p.cost_diff += p.cost_combo;
if (*num_pairs > 0 && HistogramPairIsLess(&pairs[0], &p)) {
/* Replace the top of the queue if needed. */
if (*num_pairs < max_num_pairs) {
pairs[*num_pairs] = pairs[0];
++(*num_pairs);
}
pairs[0] = p;
} else if (*num_pairs < max_num_pairs) {
pairs[*num_pairs] = p;
++(*num_pairs);
}
}
})
BROTLI_INTERNAL size_t FN(BrotliHistogramCombine)(HistogramType* out,
uint32_t* cluster_size,
uint32_t* symbols,
uint32_t* clusters,
HistogramPair* pairs,
size_t num_clusters,
size_t symbols_size,
size_t max_clusters,
size_t max_num_pairs) CODE({
double cost_diff_threshold = 0.0;
size_t min_cluster_size = 1;
size_t num_pairs = 0;
{
/* We maintain a vector of histogram pairs, with the property that the pair
with the maximum bit cost reduction is the first. */
size_t idx1;
for (idx1 = 0; idx1 < num_clusters; ++idx1) {
size_t idx2;
for (idx2 = idx1 + 1; idx2 < num_clusters; ++idx2) {
FN(BrotliCompareAndPushToQueue)(out, cluster_size, clusters[idx1],
clusters[idx2], max_num_pairs, &pairs[0], &num_pairs);
}
}
}
while (num_clusters > min_cluster_size) {
uint32_t best_idx1;
uint32_t best_idx2;
size_t i;
if (pairs[0].cost_diff >= cost_diff_threshold) {
cost_diff_threshold = 1e99;
min_cluster_size = max_clusters;
continue;
}
/* Take the best pair from the top of heap. */
best_idx1 = pairs[0].idx1;
best_idx2 = pairs[0].idx2;
FN(HistogramAddHistogram)(&out[best_idx1], &out[best_idx2]);
out[best_idx1].bit_cost_ = pairs[0].cost_combo;
cluster_size[best_idx1] += cluster_size[best_idx2];
for (i = 0; i < symbols_size; ++i) {
if (symbols[i] == best_idx2) {
symbols[i] = best_idx1;
}
}
for (i = 0; i < num_clusters; ++i) {
if (clusters[i] == best_idx2) {
memmove(&clusters[i], &clusters[i + 1],
(num_clusters - i - 1) * sizeof(clusters[0]));
break;
}
}
--num_clusters;
{
/* Remove pairs intersecting the just combined best pair. */
size_t copy_to_idx = 0;
for (i = 0; i < num_pairs; ++i) {
HistogramPair* p = &pairs[i];
if (p->idx1 == best_idx1 || p->idx2 == best_idx1 ||
p->idx1 == best_idx2 || p->idx2 == best_idx2) {
/* Remove invalid pair from the queue. */
continue;
}
if (HistogramPairIsLess(&pairs[0], p)) {
/* Replace the top of the queue if needed. */
HistogramPair front = pairs[0];
pairs[0] = *p;
pairs[copy_to_idx] = front;
} else {
pairs[copy_to_idx] = *p;
}
++copy_to_idx;
}
num_pairs = copy_to_idx;
}
/* Push new pairs formed with the combined histogram to the heap. */
for (i = 0; i < num_clusters; ++i) {
FN(BrotliCompareAndPushToQueue)(out, cluster_size, best_idx1, clusters[i],
max_num_pairs, &pairs[0], &num_pairs);
}
}
return num_clusters;
})
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
BROTLI_INTERNAL double FN(BrotliHistogramBitCostDistance)(
const HistogramType* histogram, const HistogramType* candidate) CODE({
if (histogram->total_count_ == 0) {
return 0.0;
} else {
HistogramType tmp = *histogram;
FN(HistogramAddHistogram)(&tmp, candidate);
return FN(BrotliPopulationCost)(&tmp) - candidate->bit_cost_;
}
})
/* Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
BROTLI_INTERNAL void FN(BrotliHistogramRemap)(const HistogramType* in,
size_t in_size, const uint32_t* clusters, size_t num_clusters,
HistogramType* out, uint32_t* symbols) CODE({
size_t i;
for (i = 0; i < in_size; ++i) {
uint32_t best_out = i == 0 ? symbols[0] : symbols[i - 1];
double best_bits =
FN(BrotliHistogramBitCostDistance)(&in[i], &out[best_out]);
size_t j;
for (j = 0; j < num_clusters; ++j) {
const double cur_bits =
FN(BrotliHistogramBitCostDistance)(&in[i], &out[clusters[j]]);
if (cur_bits < best_bits) {
best_bits = cur_bits;
best_out = clusters[j];
}
}
symbols[i] = best_out;
}
/* Recompute each out based on raw and symbols. */
for (i = 0; i < num_clusters; ++i) {
FN(HistogramClear)(&out[clusters[i]]);
}
for (i = 0; i < in_size; ++i) {
FN(HistogramAddHistogram)(&out[symbols[i]], &in[i]);
}
})
/* Reorders elements of the out[0..length) array and changes values in
symbols[0..length) array in the following way:
* when called, symbols[] contains indexes into out[], and has N unique
values (possibly N < length)
* on return, symbols'[i] = f(symbols[i]) and
out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length,
where f is a bijection between the range of symbols[] and [0..N), and
the first occurrences of values in symbols'[i] come in consecutive
increasing order.
Returns N, the number of unique values in symbols[]. */
BROTLI_INTERNAL size_t FN(BrotliHistogramReindex)(MemoryManager* m,
HistogramType* out, uint32_t* symbols, size_t length) CODE({
static const uint32_t kInvalidIndex = BROTLI_UINT32_MAX;
uint32_t* new_index = BROTLI_ALLOC(m, uint32_t, length);
uint32_t next_index;
HistogramType* tmp;
size_t i;
if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(new_index)) return 0;
for (i = 0; i < length; ++i) {
new_index[i] = kInvalidIndex;
}
next_index = 0;
for (i = 0; i < length; ++i) {
if (new_index[symbols[i]] == kInvalidIndex) {
new_index[symbols[i]] = next_index;
++next_index;
}
}
/* TODO: by using idea of "cycle-sort" we can avoid allocation of
tmp and reduce the number of copying by the factor of 2. */
tmp = BROTLI_ALLOC(m, HistogramType, next_index);
if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(tmp)) return 0;
next_index = 0;
for (i = 0; i < length; ++i) {
if (new_index[symbols[i]] == next_index) {
tmp[next_index] = out[symbols[i]];
++next_index;
}
symbols[i] = new_index[symbols[i]];
}
BROTLI_FREE(m, new_index);
for (i = 0; i < next_index; ++i) {
out[i] = tmp[i];
}
BROTLI_FREE(m, tmp);
return next_index;
})
BROTLI_INTERNAL void FN(BrotliClusterHistograms)(
MemoryManager* m, const HistogramType* in, const size_t in_size,
size_t max_histograms, HistogramType* out, size_t* out_size,
uint32_t* histogram_symbols) CODE({
uint32_t* cluster_size = BROTLI_ALLOC(m, uint32_t, in_size);
uint32_t* clusters = BROTLI_ALLOC(m, uint32_t, in_size);
size_t num_clusters = 0;
const size_t max_input_histograms = 64;
size_t pairs_capacity = max_input_histograms * max_input_histograms / 2;
/* For the first pass of clustering, we allow all pairs. */
HistogramPair* pairs = BROTLI_ALLOC(m, HistogramPair, pairs_capacity + 1);
size_t i;
if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(cluster_size) ||
BROTLI_IS_NULL(clusters) || BROTLI_IS_NULL(pairs)) {
return;
}
for (i = 0; i < in_size; ++i) {
cluster_size[i] = 1;
}
for (i = 0; i < in_size; ++i) {
out[i] = in[i];
out[i].bit_cost_ = FN(BrotliPopulationCost)(&in[i]);
histogram_symbols[i] = (uint32_t)i;
}
for (i = 0; i < in_size; i += max_input_histograms) {
size_t num_to_combine =
BROTLI_MIN(size_t, in_size - i, max_input_histograms);
size_t num_new_clusters;
size_t j;
for (j = 0; j < num_to_combine; ++j) {
clusters[num_clusters + j] = (uint32_t)(i + j);
}
num_new_clusters =
FN(BrotliHistogramCombine)(out, cluster_size,
&histogram_symbols[i],
&clusters[num_clusters], pairs,
num_to_combine, num_to_combine,
max_histograms, pairs_capacity);
num_clusters += num_new_clusters;
}
{
/* For the second pass, we limit the total number of histogram pairs.
After this limit is reached, we only keep searching for the best pair. */
size_t max_num_pairs = BROTLI_MIN(size_t,
64 * num_clusters, (num_clusters / 2) * num_clusters);
BROTLI_ENSURE_CAPACITY(
m, HistogramPair, pairs, pairs_capacity, max_num_pairs + 1);
if (BROTLI_IS_OOM(m)) return;
/* Collapse similar histograms. */
num_clusters = FN(BrotliHistogramCombine)(out, cluster_size,
histogram_symbols, clusters,
pairs, num_clusters, in_size,
max_histograms, max_num_pairs);
}
BROTLI_FREE(m, pairs);
BROTLI_FREE(m, cluster_size);
/* Find the optimal map from original histograms to the final ones. */
FN(BrotliHistogramRemap)(in, in_size, clusters, num_clusters,
out, histogram_symbols);
BROTLI_FREE(m, clusters);
/* Convert the context map to a canonical form. */
*out_size = FN(BrotliHistogramReindex)(m, out, histogram_symbols, in_size);
if (BROTLI_IS_OOM(m)) return;
})
#undef HistogramType
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/native/libs/System.Native/ios/netinet/udp_var.h | /*
* Copyright (c) 2008-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)udp_var.h 8.1 (Berkeley) 6/10/93
*/
#ifndef _NETINET_UDP_VAR_H_
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#define _NETINET_UDP_VAR_H_
#pragma clang diagnostic pop
#include <sys/appleapiopts.h>
#include <sys/sysctl.h>
/*
* UDP kernel structures and variables.
*/
struct udpiphdr {
struct ipovly ui_i; /* overlaid ip structure */
struct udphdr ui_u; /* udp header */
};
#define ui_x1 ui_i.ih_x1
#define ui_pr ui_i.ih_pr
#define ui_len ui_i.ih_len
#define ui_src ui_i.ih_src
#define ui_dst ui_i.ih_dst
#define ui_sport ui_u.uh_sport
#define ui_dport ui_u.uh_dport
#define ui_ulen ui_u.uh_ulen
#define ui_sum ui_u.uh_sum
#define ui_next ui_i.ih_next
#define ui_prev ui_i.ih_prev
struct udpstat {
/* input statistics: */
u_int32_t udps_ipackets; /* total input packets */
u_int32_t udps_hdrops; /* packet shorter than header */
u_int32_t udps_badsum; /* checksum error */
u_int32_t udps_badlen; /* data length larger than packet */
u_int32_t udps_noport; /* no socket on port */
u_int32_t udps_noportbcast; /* of above, arrived as broadcast */
u_int32_t udps_fullsock; /* not delivered, input socket full */
u_int32_t udpps_pcbcachemiss; /* input packets missing pcb cache */
u_int32_t udpps_pcbhashmiss; /* input packets not for hashed pcb */
/* output statistics: */
u_int32_t udps_opackets; /* total output packets */
u_int32_t udps_fastout; /* output packets on fast path */
u_int32_t udps_nosum; /* no checksum */
u_int32_t udps_noportmcast; /* of no socket on port, multicast */
u_int32_t udps_filtermcast; /* blocked by multicast filter */
/* checksum statistics: */
u_int32_t udps_rcv_swcsum; /* udp swcksum (inbound), packets */
u_int32_t udps_rcv_swcsum_bytes; /* udp swcksum (inbound), bytes */
u_int32_t udps_rcv6_swcsum; /* udp6 swcksum (inbound), packets */
u_int32_t udps_rcv6_swcsum_bytes; /* udp6 swcksum (inbound), bytes */
u_int32_t udps_snd_swcsum; /* udp swcksum (outbound), packets */
u_int32_t udps_snd_swcsum_bytes; /* udp swcksum (outbound), bytes */
u_int32_t udps_snd6_swcsum; /* udp6 swcksum (outbound), packets */
u_int32_t udps_snd6_swcsum_bytes; /* udp6 swcksum (outbound), bytes */
};
/*
* Names for UDP sysctl objects
*/
#define UDPCTL_CHECKSUM 1 /* checksum UDP packets */
#define UDPCTL_STATS 2 /* statistics (read-only) */
#define UDPCTL_MAXDGRAM 3 /* max datagram size */
#define UDPCTL_RECVSPACE 4 /* default receive buffer space */
#define UDPCTL_PCBLIST 5 /* list of PCBs for UDP sockets */
#define UDPCTL_MAXID 6
#ifdef BSD_KERNEL_PRIVATE
#include <kern/locks.h>
#include <sys/bitstring.h>
#define UDPCTL_NAMES { \
{ 0, 0 }, \
{ "checksum", CTLTYPE_INT }, \
{ "stats", CTLTYPE_STRUCT }, \
{ "maxdgram", CTLTYPE_INT }, \
{ "recvspace", CTLTYPE_INT }, \
{ "pcblist", CTLTYPE_STRUCT }, \
}
#define udp6stat udpstat
#define udp6s_opackets udps_opackets
SYSCTL_DECL(_net_inet_udp);
struct udpstat_local {
u_int64_t port_unreach;
u_int64_t faithprefix; /* deprecated */
u_int64_t port0;
u_int64_t badlength;
u_int64_t badchksum;
u_int64_t badmcast;
u_int64_t cleanup;
u_int64_t badipsec;
};
extern struct pr_usrreqs udp_usrreqs;
extern struct inpcbhead udb;
extern struct inpcbinfo udbinfo;
extern u_int32_t udp_sendspace;
extern u_int32_t udp_recvspace;
extern struct udpstat udpstat;
extern int udp_log_in_vain;
__BEGIN_DECLS
extern void udp_ctlinput(int, struct sockaddr *, void *, struct ifnet *);
extern int udp_ctloutput(struct socket *, struct sockopt *);
extern void udp_init(struct protosw *, struct domain *);
extern void udp_input(struct mbuf *, int);
extern int udp_connectx_common(struct socket *, int, struct sockaddr *,
struct sockaddr *, struct proc *, uint32_t, sae_associd_t,
sae_connid_t *, uint32_t, void *, uint32_t, struct uio*, user_ssize_t *);
extern void udp_notify(struct inpcb *inp, int errno);
extern int udp_shutdown(struct socket *so);
extern int udp_lock(struct socket *, int, void *);
extern int udp_unlock(struct socket *, int, void *);
extern lck_mtx_t *udp_getlock(struct socket *, int);
extern void udp_get_ports_used(u_int32_t, int, u_int32_t, bitstr_t *);
extern uint32_t udp_count_opportunistic(unsigned int, u_int32_t);
extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *);
extern void udp_fill_keepalive_offload_frames(struct ifnet *,
struct ifnet_keepalive_offload_frame *, u_int32_t, size_t, u_int32_t *);
__END_DECLS
#endif /* BSD_KERNEL_PRIVATE */
#endif /* _NETINET_UDP_VAR_H_ */ | /*
* Copyright (c) 2008-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)udp_var.h 8.1 (Berkeley) 6/10/93
*/
#ifndef _NETINET_UDP_VAR_H_
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#define _NETINET_UDP_VAR_H_
#pragma clang diagnostic pop
#include <sys/appleapiopts.h>
#include <sys/sysctl.h>
/*
* UDP kernel structures and variables.
*/
struct udpiphdr {
struct ipovly ui_i; /* overlaid ip structure */
struct udphdr ui_u; /* udp header */
};
#define ui_x1 ui_i.ih_x1
#define ui_pr ui_i.ih_pr
#define ui_len ui_i.ih_len
#define ui_src ui_i.ih_src
#define ui_dst ui_i.ih_dst
#define ui_sport ui_u.uh_sport
#define ui_dport ui_u.uh_dport
#define ui_ulen ui_u.uh_ulen
#define ui_sum ui_u.uh_sum
#define ui_next ui_i.ih_next
#define ui_prev ui_i.ih_prev
struct udpstat {
/* input statistics: */
u_int32_t udps_ipackets; /* total input packets */
u_int32_t udps_hdrops; /* packet shorter than header */
u_int32_t udps_badsum; /* checksum error */
u_int32_t udps_badlen; /* data length larger than packet */
u_int32_t udps_noport; /* no socket on port */
u_int32_t udps_noportbcast; /* of above, arrived as broadcast */
u_int32_t udps_fullsock; /* not delivered, input socket full */
u_int32_t udpps_pcbcachemiss; /* input packets missing pcb cache */
u_int32_t udpps_pcbhashmiss; /* input packets not for hashed pcb */
/* output statistics: */
u_int32_t udps_opackets; /* total output packets */
u_int32_t udps_fastout; /* output packets on fast path */
u_int32_t udps_nosum; /* no checksum */
u_int32_t udps_noportmcast; /* of no socket on port, multicast */
u_int32_t udps_filtermcast; /* blocked by multicast filter */
/* checksum statistics: */
u_int32_t udps_rcv_swcsum; /* udp swcksum (inbound), packets */
u_int32_t udps_rcv_swcsum_bytes; /* udp swcksum (inbound), bytes */
u_int32_t udps_rcv6_swcsum; /* udp6 swcksum (inbound), packets */
u_int32_t udps_rcv6_swcsum_bytes; /* udp6 swcksum (inbound), bytes */
u_int32_t udps_snd_swcsum; /* udp swcksum (outbound), packets */
u_int32_t udps_snd_swcsum_bytes; /* udp swcksum (outbound), bytes */
u_int32_t udps_snd6_swcsum; /* udp6 swcksum (outbound), packets */
u_int32_t udps_snd6_swcsum_bytes; /* udp6 swcksum (outbound), bytes */
};
/*
* Names for UDP sysctl objects
*/
#define UDPCTL_CHECKSUM 1 /* checksum UDP packets */
#define UDPCTL_STATS 2 /* statistics (read-only) */
#define UDPCTL_MAXDGRAM 3 /* max datagram size */
#define UDPCTL_RECVSPACE 4 /* default receive buffer space */
#define UDPCTL_PCBLIST 5 /* list of PCBs for UDP sockets */
#define UDPCTL_MAXID 6
#ifdef BSD_KERNEL_PRIVATE
#include <kern/locks.h>
#include <sys/bitstring.h>
#define UDPCTL_NAMES { \
{ 0, 0 }, \
{ "checksum", CTLTYPE_INT }, \
{ "stats", CTLTYPE_STRUCT }, \
{ "maxdgram", CTLTYPE_INT }, \
{ "recvspace", CTLTYPE_INT }, \
{ "pcblist", CTLTYPE_STRUCT }, \
}
#define udp6stat udpstat
#define udp6s_opackets udps_opackets
SYSCTL_DECL(_net_inet_udp);
struct udpstat_local {
u_int64_t port_unreach;
u_int64_t faithprefix; /* deprecated */
u_int64_t port0;
u_int64_t badlength;
u_int64_t badchksum;
u_int64_t badmcast;
u_int64_t cleanup;
u_int64_t badipsec;
};
extern struct pr_usrreqs udp_usrreqs;
extern struct inpcbhead udb;
extern struct inpcbinfo udbinfo;
extern u_int32_t udp_sendspace;
extern u_int32_t udp_recvspace;
extern struct udpstat udpstat;
extern int udp_log_in_vain;
__BEGIN_DECLS
extern void udp_ctlinput(int, struct sockaddr *, void *, struct ifnet *);
extern int udp_ctloutput(struct socket *, struct sockopt *);
extern void udp_init(struct protosw *, struct domain *);
extern void udp_input(struct mbuf *, int);
extern int udp_connectx_common(struct socket *, int, struct sockaddr *,
struct sockaddr *, struct proc *, uint32_t, sae_associd_t,
sae_connid_t *, uint32_t, void *, uint32_t, struct uio*, user_ssize_t *);
extern void udp_notify(struct inpcb *inp, int errno);
extern int udp_shutdown(struct socket *so);
extern int udp_lock(struct socket *, int, void *);
extern int udp_unlock(struct socket *, int, void *);
extern lck_mtx_t *udp_getlock(struct socket *, int);
extern void udp_get_ports_used(u_int32_t, int, u_int32_t, bitstr_t *);
extern uint32_t udp_count_opportunistic(unsigned int, u_int32_t);
extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *);
extern void udp_fill_keepalive_offload_frames(struct ifnet *,
struct ifnet_keepalive_offload_frame *, u_int32_t, size_t, u_int32_t *);
__END_DECLS
#endif /* BSD_KERNEL_PRIVATE */
#endif /* _NETINET_UDP_VAR_H_ */ | -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/utils/mono-dl-wasm.c | #include <config.h>
#include <mono/utils/mono-compiler.h>
#if defined (HOST_WASM)
#include "mono/utils/mono-dl.h"
#include "mono/utils/mono-embed.h"
#include "mono/utils/mono-path.h"
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#include <glib.h>
#ifndef HOST_WASI
#include <dlfcn.h>
#endif
const char *
mono_dl_get_so_prefix (void)
{
return "";
}
const char **
mono_dl_get_so_suffixes (void)
{
static const char *suffixes[] = {
".wasm", //we only recognize .wasm files for DSOs.
"",
};
return suffixes;
}
const char*
mono_dl_get_system_dir (void)
{
return NULL;
}
void*
mono_dl_lookup_symbol (MonoDl *module, const char *name)
{
return NULL;
}
char*
mono_dl_current_error_string (void)
{
return g_strdup ("");
}
// Copied from mono-dl-posix.c
int
mono_dl_convert_flags (int mono_flags, int native_flags)
{
int lflags = native_flags;
#ifndef HOST_WASI // On WASI, these flags are undefined and not required
// Specifying both will default to LOCAL
if (mono_flags & MONO_DL_GLOBAL && !(mono_flags & MONO_DL_LOCAL))
lflags |= RTLD_GLOBAL;
else
lflags |= RTLD_LOCAL;
if (mono_flags & MONO_DL_LAZY)
lflags |= RTLD_LAZY;
else
lflags |= RTLD_NOW;
#endif
return lflags;
}
void *
mono_dl_open_file (const char *file, int flags)
{
// Actual dlopen is done in driver.c:wasm_dl_load()
return NULL;
}
void
mono_dl_close_handle (MonoDl *module)
{
}
#else
MONO_EMPTY_SOURCE_FILE (mono_dl_wasm);
#endif
| #include <config.h>
#include <mono/utils/mono-compiler.h>
#if defined (HOST_WASM)
#include "mono/utils/mono-dl.h"
#include "mono/utils/mono-embed.h"
#include "mono/utils/mono-path.h"
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#include <glib.h>
#ifndef HOST_WASI
#include <dlfcn.h>
#endif
const char *
mono_dl_get_so_prefix (void)
{
return "";
}
const char **
mono_dl_get_so_suffixes (void)
{
static const char *suffixes[] = {
".wasm", //we only recognize .wasm files for DSOs.
"",
};
return suffixes;
}
const char*
mono_dl_get_system_dir (void)
{
return NULL;
}
void*
mono_dl_lookup_symbol (MonoDl *module, const char *name)
{
return NULL;
}
char*
mono_dl_current_error_string (void)
{
return g_strdup ("");
}
// Copied from mono-dl-posix.c
int
mono_dl_convert_flags (int mono_flags, int native_flags)
{
int lflags = native_flags;
#ifndef HOST_WASI // On WASI, these flags are undefined and not required
// Specifying both will default to LOCAL
if (mono_flags & MONO_DL_GLOBAL && !(mono_flags & MONO_DL_LOCAL))
lflags |= RTLD_GLOBAL;
else
lflags |= RTLD_LOCAL;
if (mono_flags & MONO_DL_LAZY)
lflags |= RTLD_LAZY;
else
lflags |= RTLD_NOW;
#endif
return lflags;
}
void *
mono_dl_open_file (const char *file, int flags)
{
// Actual dlopen is done in driver.c:wasm_dl_load()
return NULL;
}
void
mono_dl_close_handle (MonoDl *module)
{
}
#else
MONO_EMPTY_SOURCE_FILE (mono_dl_wasm);
#endif
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/native/libs/System.Native/pal_networkstatistics.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_config.h"
#include "pal_errno.h"
#include "pal_networkstatistics.h"
#include <stdlib.h>
#include <errno.h>
// These functions are only used for platforms which support
// using sysctl to gather protocol statistics information.
// Currently, this is all keyed off of whether the include tcp_var.h
// exists, but we may want to make this more granular for different platforms.
#if HAVE_NETINET_TCP_VAR_H
#include "pal_utilities.h"
#include "pal_tcpstate.h"
#include "pal_safecrt.h"
#include <sys/socket.h>
#if HAVE_IOS_NET_ROUTE_H
#include "ios/net/route.h"
#else
#include <net/route.h>
#endif
#include <net/if.h>
#include <sys/types.h>
#include <stdatomic.h>
#if HAVE_SYS_SYSCTL_H
#include <sys/sysctl.h>
#endif
#if HAVE_NET_IFMEDIA_H
#include <net/if_media.h>
#include <sys/ioctl.h>
#elif HAVE_IOS_NET_IFMEDIA_H
#include "ios/net/if_media.h"
#include <sys/ioctl.h>
#endif
#include <sys/socketvar.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#if HAVE_NETINET_IP_VAR_H
#include <netinet/ip_var.h>
#elif HAVE_IOS_NETINET_IP_VAR_H
#include "ios/netinet/ip_var.h"
#endif
#include <netinet/tcp.h>
#if HAVE_TCP_FSM_H
#include <netinet/tcp_fsm.h>
#endif
#include <netinet/tcp_var.h>
#include <netinet/udp.h>
#if HAVE_NETINET_UDP_VAR_H
#include <netinet/udp_var.h>
#elif HAVE_IOS_NETINET_UDP_VAR_H
#include "ios/netinet/udp_var.h"
#endif
#include <netinet/icmp6.h>
#if HAVE_NETINET_ICMP_VAR_H
#include <netinet/icmp_var.h>
#elif HAVE_IOS_NETINET_ICMP_VAR_H
#include "ios/netinet/icmp_var.h"
#endif
static _Atomic(int) icmp6statSize = sizeof(struct icmp6stat);
static size_t GetEstimatedSize(const char* name)
{
void* oldp = NULL;
size_t oldlenp = 0;
sysctlbyname(name, oldp, &oldlenp, NULL, 0);
return oldlenp;
}
int32_t SystemNative_GetTcpGlobalStatistics(TcpGlobalStatistics* retStats)
{
size_t oldlenp;
assert(retStats != NULL);
struct tcpstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.tcp.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(TcpGlobalStatistics)); // out parameter must be initialized.
return -1;
}
retStats->ConnectionsAccepted = systemStats.tcps_accepts;
retStats->ConnectionsInitiated = systemStats.tcps_connattempt;
retStats->CumulativeConnections = systemStats.tcps_connects;
retStats->ErrorsReceived = systemStats.tcps_rcvbadsum + systemStats.tcps_rcvbadoff;
retStats->FailedConnectionAttempts = systemStats.tcps_connattempt - systemStats.tcps_accepts;
retStats->SegmentsReceived = systemStats.tcps_rcvtotal;
retStats->SegmentsResent = systemStats.tcps_sndrexmitpack;
retStats->SegmentsSent = systemStats.tcps_sndtotal;
oldlenp = sizeof(retStats->CurrentConnections);
if (sysctlbyname("net.inet.tcp.pcbcount", &retStats->CurrentConnections, &oldlenp, NULL, 0))
{
retStats->CurrentConnections = 0;
return -1;
}
return 0;
}
int32_t SystemNative_GetIPv4GlobalStatistics(IPv4GlobalStatistics* retStats)
{
#if HAVE_NETINET_IP_VAR_H || HAVE_IOS_NETINET_IP_VAR_H
size_t oldlenp;
assert(retStats != NULL);
struct ipstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.ip.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(IPv4GlobalStatistics)); // out parameter must be initialized.
return -1;
}
retStats->OutboundPackets = systemStats.ips_localout;
retStats->OutputPacketsNoRoute = systemStats.ips_noroute;
retStats->CantFrags = systemStats.ips_cantfrag;
retStats->DatagramsFragmented = systemStats.ips_fragmented;
retStats->PacketsReassembled = systemStats.ips_reassembled;
retStats->TotalPacketsReceived = systemStats.ips_total;
retStats->PacketsDelivered = systemStats.ips_delivered;
retStats->PacketsDiscarded = systemStats.ips_total - systemStats.ips_delivered;
retStats->PacketsForwarded = systemStats.ips_forward;
retStats->BadAddress = systemStats.ips_badaddr;
retStats->BadHeader = systemStats.ips_badhlen; // Also include badaddr?
retStats->UnknownProtos = systemStats.ips_noproto;
oldlenp = sizeof(retStats->DefaultTtl);
if (sysctlbyname("net.inet.ip.ttl", &retStats->DefaultTtl, &oldlenp, NULL, 0))
{
retStats->DefaultTtl = 0;
retStats->Forwarding = 0;
return -1;
}
oldlenp = sizeof(retStats->Forwarding);
if (sysctlbyname("net.inet.ip.forwarding", &retStats->Forwarding, &oldlenp, NULL, 0))
{
retStats->Forwarding = 0;
return -1;
}
return 0;
#else
memset(retStats, 0, sizeof(IPv4GlobalStatistics)); // out parameter must be initialized.
return -1;
#endif
}
int32_t SystemNative_GetUdpGlobalStatistics(UdpGlobalStatistics* retStats)
{
#if HAVE_NETINET_UDP_VAR_H || HAVE_IOS_NETINET_UDP_VAR_H
size_t oldlenp;
assert(retStats != NULL);
struct udpstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.udp.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(UdpGlobalStatistics)); // out parameter must be initialized.
return -1;
}
retStats->DatagramsReceived = systemStats.udps_ipackets;
retStats->DatagramsSent = systemStats.udps_opackets;
retStats->IncomingDiscarded = systemStats.udps_noport;
retStats->IncomingErrors = systemStats.udps_hdrops + systemStats.udps_badsum + systemStats.udps_badlen;
#if defined(__FreeBSD__)
// FreeBSD does not have net.inet.udp.pcbcount
retStats->UdpListeners = 0;
#else
// This may contain both UDP4 and UDP6 listeners.
oldlenp = sizeof(retStats->UdpListeners);
if (sysctlbyname("net.inet.udp.pcbcount", &retStats->UdpListeners, &oldlenp, NULL, 0))
{
retStats->UdpListeners = 0;
return -1;
}
#endif
return 0;
#else
memset(retStats, 0, sizeof(UdpGlobalStatistics)); // out parameter must be initialized.
return -1;
#endif
}
int32_t SystemNative_GetIcmpv4GlobalStatistics(Icmpv4GlobalStatistics* retStats)
{
#if HAVE_NETINET_ICMP_VAR_H || HAVE_IOS_NETINET_ICMP_VAR_H
size_t oldlenp;
assert(retStats != NULL);
struct icmpstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.icmp.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(Icmpv4GlobalStatistics));
return -1;
}
TYPEOF(systemStats.icps_inhist[0])* inHist = systemStats.icps_inhist;
TYPEOF(systemStats.icps_outhist[0])* outHist = systemStats.icps_outhist;
retStats->AddressMaskRepliesReceived = inHist[ICMP_MASKREPLY];
retStats->AddressMaskRepliesSent = outHist[ICMP_MASKREPLY];
retStats->AddressMaskRequestsReceived = inHist[ICMP_MASKREQ];
retStats->AddressMaskRequestsSent = outHist[ICMP_MASKREQ];
retStats->DestinationUnreachableMessagesReceived = inHist[ICMP_UNREACH];
retStats->DestinationUnreachableMessagesSent = outHist[ICMP_UNREACH];
retStats->EchoRepliesReceived = inHist[ICMP_ECHOREPLY];
retStats->EchoRepliesSent = outHist[ICMP_ECHOREPLY];
retStats->EchoRequestsReceived = inHist[ICMP_ECHO];
retStats->EchoRequestsSent = outHist[ICMP_ECHO];
retStats->ParameterProblemsReceived = inHist[ICMP_PARAMPROB];
retStats->ParameterProblemsSent = outHist[ICMP_PARAMPROB];
retStats->RedirectsReceived = inHist[ICMP_REDIRECT];
retStats->RedirectsSent = outHist[ICMP_REDIRECT];
retStats->SourceQuenchesReceived = inHist[ICMP_SOURCEQUENCH];
retStats->SourceQuenchesSent = outHist[ICMP_SOURCEQUENCH];
retStats->TimeExceededMessagesReceived = inHist[ICMP_TIMXCEED];
retStats->TimeExceededMessagesSent = outHist[ICMP_TIMXCEED];
retStats->TimestampRepliesReceived = inHist[ICMP_TSTAMPREPLY];
retStats->TimestampRepliesSent = outHist[ICMP_TSTAMPREPLY];
retStats->TimestampRequestsReceived = inHist[ICMP_TSTAMP];
retStats->TimestampRequestsSent = outHist[ICMP_TSTAMP];
return 0;
#else
memset(retStats, 0, sizeof(Icmpv4GlobalStatistics)); // out parameter must be initialized.
return -1;
#endif
}
int32_t SystemNative_GetIcmpv6GlobalStatistics(Icmpv6GlobalStatistics* retStats)
{
assert(retStats != NULL);
size_t oldlenp = (size_t)atomic_load(&icmp6statSize);
const char* sysctlName = "net.inet6.icmp6.stats";
void* buffer = malloc(oldlenp);
if (!buffer)
{
memset(retStats, 0, sizeof(Icmpv6GlobalStatistics));
errno = ENOMEM;
return -1;
}
int result = sysctlbyname(sysctlName, buffer, &oldlenp, NULL, 0);
if (result && errno == ENOMEM)
{
// We did not provide enough memory.
// macOS 11.0 added new member to icmp6stat so as FreeBSD reported changes between versions.
oldlenp = GetEstimatedSize(sysctlName);
free(buffer);
buffer = malloc(oldlenp);
if (!buffer)
{
memset(retStats, 0, sizeof(Icmpv6GlobalStatistics));
errno = ENOMEM;
return -1;
}
result = sysctlbyname(sysctlName, buffer, &oldlenp, NULL, 0);
if (result == 0)
{
// if the call succeeded, update icmp6statSize
atomic_store(&icmp6statSize, oldlenp);
}
}
if (result)
{
if (buffer)
{
free(buffer);
}
memset(retStats, 0, sizeof(Icmpv6GlobalStatistics));
return -1;
}
uint64_t* inHist = ((struct icmp6stat*)(buffer))->icp6s_inhist;
uint64_t* outHist = ((struct icmp6stat*)(buffer))->icp6s_outhist;
retStats->DestinationUnreachableMessagesReceived = inHist[ICMP6_DST_UNREACH];
retStats->DestinationUnreachableMessagesSent = outHist[ICMP6_DST_UNREACH];
retStats->EchoRepliesReceived = inHist[ICMP6_ECHO_REPLY];
retStats->EchoRepliesSent = outHist[ICMP6_ECHO_REPLY];
retStats->EchoRequestsReceived = inHist[ICMP6_ECHO_REQUEST];
retStats->EchoRequestsSent = outHist[ICMP6_ECHO_REQUEST];
retStats->MembershipQueriesReceived = inHist[ICMP6_MEMBERSHIP_QUERY];
retStats->MembershipQueriesSent = outHist[ICMP6_MEMBERSHIP_QUERY];
retStats->MembershipReductionsReceived = inHist[ICMP6_MEMBERSHIP_REDUCTION];
retStats->MembershipReductionsSent = outHist[ICMP6_MEMBERSHIP_REDUCTION];
retStats->MembershipReportsReceived = inHist[ICMP6_MEMBERSHIP_REPORT];
retStats->MembershipReportsSent = outHist[ICMP6_MEMBERSHIP_REPORT];
retStats->NeighborAdvertisementsReceived = inHist[ND_NEIGHBOR_ADVERT];
retStats->NeighborAdvertisementsSent = outHist[ND_NEIGHBOR_ADVERT];
retStats->NeighborSolicitsReceived = inHist[ND_NEIGHBOR_SOLICIT];
retStats->NeighborSolicitsSent = outHist[ND_NEIGHBOR_SOLICIT];
retStats->PacketTooBigMessagesReceived = inHist[ICMP6_PACKET_TOO_BIG];
retStats->PacketTooBigMessagesSent = outHist[ICMP6_PACKET_TOO_BIG];
retStats->ParameterProblemsReceived = inHist[ICMP6_PARAM_PROB];
retStats->ParameterProblemsSent = outHist[ICMP6_PARAM_PROB];
retStats->RedirectsReceived = inHist[ND_REDIRECT];
retStats->RedirectsSent = outHist[ND_REDIRECT];
retStats->RouterAdvertisementsReceived = inHist[ND_ROUTER_ADVERT];
retStats->RouterAdvertisementsSent = outHist[ND_ROUTER_ADVERT];
retStats->RouterSolicitsReceived = inHist[ND_ROUTER_SOLICIT];
retStats->RouterSolicitsSent = outHist[ND_ROUTER_SOLICIT];
retStats->TimeExceededMessagesReceived = inHist[ICMP6_TIME_EXCEEDED];
retStats->TimeExceededMessagesSent = outHist[ICMP6_TIME_EXCEEDED];
free(buffer);
return 0;
}
int32_t SystemNative_GetEstimatedTcpConnectionCount()
{
int32_t count;
size_t oldlenp = sizeof(count);
sysctlbyname("net.inet.tcp.pcbcount", &count, &oldlenp, NULL, 0);
return count;
}
#ifdef __FreeBSD__
int32_t SystemNative_GetActiveTcpConnectionInfos(__attribute__((unused)) NativeTcpConnectionInformation* infos, int32_t* infoCount)
{
*infoCount = 0;
return 0;
}
#else
int32_t SystemNative_GetActiveTcpConnectionInfos(NativeTcpConnectionInformation* infos, int32_t* infoCount)
{
assert(infos != NULL);
assert(infoCount != NULL);
const char* sysctlName = "net.inet.tcp.pcblist";
size_t estimatedSize = GetEstimatedSize(sysctlName);
uint8_t* buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
void* newp = NULL;
size_t newlen = 0;
while (sysctlbyname(sysctlName, buffer, &estimatedSize, newp, newlen) != 0)
{
free(buffer);
size_t tmpEstimatedSize;
if (!multiply_s(estimatedSize, (size_t)2, &tmpEstimatedSize) ||
(buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t))) == NULL)
{
errno = ENOMEM;
return -1;
}
estimatedSize = tmpEstimatedSize;
}
int32_t count = (int32_t)(estimatedSize / sizeof(struct xtcpcb));
if (count > *infoCount)
{
// Not enough space in caller-supplied buffer.
free(buffer);
*infoCount = count;
return -1;
}
*infoCount = count;
// sizeof(struct xtcpcb) == 524
struct tcpcb tcp_pcb;
struct inpcb in_pcb;
struct xinpgen* xHeadPtr;
int32_t connectionIndex = -1;
xHeadPtr = (struct xinpgen*)buffer;
for (xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len);
xHeadPtr->xig_len >= sizeof(struct xtcpcb);
xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len))
{
connectionIndex++;
struct xtcpcb* head_xtcpb = (struct xtcpcb*)xHeadPtr;
tcp_pcb = head_xtcpb->xt_tp;
in_pcb = head_xtcpb->xt_inp;
NativeTcpConnectionInformation* ntci = &infos[connectionIndex];
ntci->State = SystemNative_MapTcpState(tcp_pcb.t_state);
uint8_t vflag = in_pcb.inp_vflag; // INP_IPV4 or INP_IPV6
if ((vflag & INP_IPV4) == INP_IPV4)
{
memcpy_s(&ntci->LocalEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.inp_laddr.s_addr, 4);
memcpy_s(&ntci->RemoteEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.inp_faddr.s_addr, 4);
ntci->LocalEndPoint.NumAddressBytes = 4;
ntci->RemoteEndPoint.NumAddressBytes = 4;
}
else
{
memcpy_s(&ntci->LocalEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.in6p_laddr.s6_addr, 16);
memcpy_s(&ntci->RemoteEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.in6p_faddr.s6_addr, 16);
ntci->LocalEndPoint.NumAddressBytes = 16;
ntci->RemoteEndPoint.NumAddressBytes = 16;
}
ntci->LocalEndPoint.Port = ntohs(in_pcb.inp_lport);
ntci->RemoteEndPoint.Port = ntohs(in_pcb.inp_fport);
}
free(buffer);
return 0;
}
#endif
int32_t SystemNative_GetEstimatedUdpListenerCount()
{
int32_t count;
size_t oldlenp = sizeof(count);
sysctlbyname("net.inet.udp.pcbcount", &count, &oldlenp, NULL, 0);
return count;
}
#ifdef __FreeBSD__
int32_t SystemNative_GetActiveUdpListeners(__attribute__((unused)) IPEndPointInfo* infos, int32_t* infoCount)
{
*infoCount = 0;
return 0;
}
#else
int32_t SystemNative_GetActiveUdpListeners(IPEndPointInfo* infos, int32_t* infoCount)
{
assert(infos != NULL);
assert(infoCount != NULL);
const char* sysctlName = "net.inet.udp.pcblist";
size_t estimatedSize = GetEstimatedSize(sysctlName);
uint8_t* buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
void* newp = NULL;
size_t newlen = 0;
while (sysctlbyname(sysctlName, buffer, &estimatedSize, newp, newlen) != 0)
{
free(buffer);
size_t tmpEstimatedSize;
if (!multiply_s(estimatedSize, (size_t)2, &tmpEstimatedSize) ||
(buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t))) == NULL)
{
errno = ENOMEM;
return -1;
}
estimatedSize = tmpEstimatedSize;
}
int32_t count = (int32_t)(estimatedSize / sizeof(struct xtcpcb));
if (count > *infoCount)
{
// Not enough space in caller-supplied buffer.
free(buffer);
*infoCount = count;
return -1;
}
*infoCount = count;
struct inpcb in_pcb;
struct xinpgen* xHeadPtr;
int32_t connectionIndex = -1;
xHeadPtr = (struct xinpgen*)buffer;
for (xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len);
xHeadPtr->xig_len >= sizeof(struct xinpcb);
xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len))
{
connectionIndex++;
struct xinpcb* head_xinpcb = (struct xinpcb*)xHeadPtr;
in_pcb = head_xinpcb->xi_inp;
IPEndPointInfo* iepi = &infos[connectionIndex];
uint8_t vflag = in_pcb.inp_vflag; // INP_IPV4 or INP_IPV6
if ((vflag & INP_IPV4) == INP_IPV4)
{
memcpy_s(iepi->AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.inp_laddr.s_addr, 4);
iepi->NumAddressBytes = 4;
}
else
{
memcpy_s(iepi->AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.in6p_laddr.s6_addr, 16);
iepi->NumAddressBytes = 16;
}
iepi->Port = ntohs(in_pcb.inp_lport);
}
free(buffer);
return 0;
}
#endif
int32_t SystemNative_GetNativeIPInterfaceStatistics(char* interfaceName, NativeIPInterfaceStatistics* retStats)
{
assert(interfaceName != NULL && retStats != NULL);
unsigned int interfaceIndex = if_nametoindex(interfaceName);
if (interfaceIndex == 0)
{
// An invalid interface name was given (doesn't exist).
return -1;
}
#if HAVE_IF_MSGHDR2
int statisticsMib[] = {CTL_NET, PF_ROUTE, 0, 0, NET_RT_IFLIST2, (int)interfaceIndex};
#else
int statisticsMib[] = {CTL_NET, PF_ROUTE, 0, 0, NET_RT_IFLIST, (int)interfaceIndex};
#endif
size_t len;
// Get estimated data length
if (sysctl(statisticsMib, 6, NULL, &len, NULL, 0) == -1)
{
memset(retStats, 0, sizeof(NativeIPInterfaceStatistics));
return -1;
}
uint8_t* buffer = (uint8_t*)malloc(len * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
if (sysctl(statisticsMib, 6, buffer, &len, NULL, 0) == -1)
{
// Not enough space.
free(buffer);
memset(retStats, 0, sizeof(NativeIPInterfaceStatistics));
return -1;
}
for (uint8_t* headPtr = buffer; headPtr <= buffer + len;
headPtr += ((struct if_msghdr*)headPtr)->ifm_msglen)
{
struct if_msghdr* ifHdr = (struct if_msghdr*)headPtr;
#if HAVE_IF_MSGHDR2
if (ifHdr->ifm_index == interfaceIndex && ifHdr->ifm_type == RTM_IFINFO2)
{
struct if_msghdr2* ifHdr2 = (struct if_msghdr2*)ifHdr;
retStats->SendQueueLength = (uint64_t)ifHdr2->ifm_snd_maxlen;
struct if_data64 systemStats = ifHdr2->ifm_data;
#else
if (ifHdr->ifm_index == interfaceIndex && ifHdr->ifm_type == RTM_IFINFO)
{
struct if_msghdr* ifHdr2 = (struct if_msghdr*)ifHdr;
retStats->SendQueueLength = 0;
struct if_data systemStats = ifHdr2->ifm_data;
#endif
retStats->Mtu = systemStats.ifi_mtu;
retStats->Speed = systemStats.ifi_baudrate; // bits per second.
retStats->InPackets = systemStats.ifi_ipackets;
retStats->InErrors = systemStats.ifi_ierrors;
retStats->OutPackets = systemStats.ifi_opackets;
retStats->OutErrors = systemStats.ifi_oerrors;
retStats->InBytes = systemStats.ifi_ibytes;
retStats->OutBytes = systemStats.ifi_obytes;
retStats->InMulticastPackets = systemStats.ifi_imcasts;
retStats->OutMulticastPackets = systemStats.ifi_omcasts;
retStats->InDrops = systemStats.ifi_iqdrops;
retStats->InNoProto = systemStats.ifi_noproto;
retStats->Flags = 0;
if (ifHdr->ifm_flags & IFF_UP)
{
retStats->Flags |= InterfaceUp;
#if HAVE_NET_IFMEDIA_H || HAVE_IOS_NET_IFMEDIA_H
int fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0) {
retStats->Flags |= InterfaceError;
}
else
{
struct ifmediareq ifmr;
memset(&ifmr, 0, sizeof(ifmr));
strncpy(ifmr.ifm_name, interfaceName, sizeof(ifmr.ifm_name));
if (ioctl(fd, SIOCGIFMEDIA, (caddr_t)&ifmr) < 0)
{
if (errno == EOPNOTSUPP || errno == EINVAL)
{
// Virtual interfaces like loopback do not have media.
// Assume they are up when administrative state is up.
retStats->Flags |= InterfaceHasLink;
}
else
{
retStats->Flags |= InterfaceError;
}
}
else if ((ifmr.ifm_status & IFM_AVALID) == 0)
{
// WI-FI on macOS sometimes does not report link when interface is disabled. (still has _UP flag)
// For other interface types, report Unknown status.
if (IFM_TYPE(ifmr.ifm_current) != IFM_IEEE80211)
{
retStats->Flags |= InterfaceError;
}
}
else
{
if (ifmr.ifm_status & IFM_ACTIVE)
{
retStats->Flags |= InterfaceHasLink;
}
}
close(fd);
}
#else
retStats->Flags |= InterfaceError;
#endif
}
if (ifHdr->ifm_flags & (IFF_MULTICAST | IFF_ALLMULTI))
{
retStats->Flags |= InterfaceSupportsMulticast;
}
free(buffer);
return 0;
}
}
// No statistics were found with the given interface index; shouldn't happen.
free(buffer);
memset(retStats, 0, sizeof(NativeIPInterfaceStatistics));
return -1;
}
int32_t SystemNative_GetNumRoutes()
{
int32_t count = 0;
#if HAVE_RT_MSGHDR2
int routeDumpMib[] = {CTL_NET, PF_ROUTE, 0, 0, NET_RT_DUMP, 0};
size_t len;
if (sysctl(routeDumpMib, 6, NULL, &len, NULL, 0) == -1)
{
return -1;
}
uint8_t* buffer = (uint8_t*)malloc(len * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
if (sysctl(routeDumpMib, 6, buffer, &len, NULL, 0) == -1)
{
free(buffer);
return -1;
}
uint8_t* headPtr = buffer;
struct rt_msghdr2* rtmsg;
for (size_t i = 0; i < len; i += rtmsg->rtm_msglen)
{
rtmsg = (struct rt_msghdr2*)&buffer[i];
if (rtmsg->rtm_flags & RTF_UP)
{
count++;
}
headPtr += rtmsg->rtm_msglen;
}
free(buffer);
#endif // HAVE_RT_MSGHDR2
return count;
}
#else
int32_t SystemNative_GetTcpGlobalStatistics(TcpGlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetIPv4GlobalStatistics(IPv4GlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetUdpGlobalStatistics(UdpGlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetIcmpv4GlobalStatistics(Icmpv4GlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetIcmpv6GlobalStatistics(Icmpv6GlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetEstimatedTcpConnectionCount(void)
{
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetActiveTcpConnectionInfos(NativeTcpConnectionInformation* infos, int32_t* infoCount)
{
(void)infos;
(void)infoCount;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetEstimatedUdpListenerCount(void)
{
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetActiveUdpListeners(IPEndPointInfo* infos, int32_t* infoCount)
{
(void)infos;
(void)infoCount;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetNativeIPInterfaceStatistics(char* interfaceName, NativeIPInterfaceStatistics* retStats)
{
(void)interfaceName;
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetNumRoutes(void)
{
errno = ENOTSUP;
return -1;
}
#endif // HAVE_NETINET_TCP_VAR_H
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_config.h"
#include "pal_errno.h"
#include "pal_networkstatistics.h"
#include <stdlib.h>
#include <errno.h>
// These functions are only used for platforms which support
// using sysctl to gather protocol statistics information.
// Currently, this is all keyed off of whether the include tcp_var.h
// exists, but we may want to make this more granular for different platforms.
#if HAVE_NETINET_TCP_VAR_H
#include "pal_utilities.h"
#include "pal_tcpstate.h"
#include "pal_safecrt.h"
#include <sys/socket.h>
#if HAVE_IOS_NET_ROUTE_H
#include "ios/net/route.h"
#else
#include <net/route.h>
#endif
#include <net/if.h>
#include <sys/types.h>
#include <stdatomic.h>
#if HAVE_SYS_SYSCTL_H
#include <sys/sysctl.h>
#endif
#if HAVE_NET_IFMEDIA_H
#include <net/if_media.h>
#include <sys/ioctl.h>
#elif HAVE_IOS_NET_IFMEDIA_H
#include "ios/net/if_media.h"
#include <sys/ioctl.h>
#endif
#include <sys/socketvar.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#if HAVE_NETINET_IP_VAR_H
#include <netinet/ip_var.h>
#elif HAVE_IOS_NETINET_IP_VAR_H
#include "ios/netinet/ip_var.h"
#endif
#include <netinet/tcp.h>
#if HAVE_TCP_FSM_H
#include <netinet/tcp_fsm.h>
#endif
#include <netinet/tcp_var.h>
#include <netinet/udp.h>
#if HAVE_NETINET_UDP_VAR_H
#include <netinet/udp_var.h>
#elif HAVE_IOS_NETINET_UDP_VAR_H
#include "ios/netinet/udp_var.h"
#endif
#include <netinet/icmp6.h>
#if HAVE_NETINET_ICMP_VAR_H
#include <netinet/icmp_var.h>
#elif HAVE_IOS_NETINET_ICMP_VAR_H
#include "ios/netinet/icmp_var.h"
#endif
static _Atomic(int) icmp6statSize = sizeof(struct icmp6stat);
static size_t GetEstimatedSize(const char* name)
{
void* oldp = NULL;
size_t oldlenp = 0;
sysctlbyname(name, oldp, &oldlenp, NULL, 0);
return oldlenp;
}
int32_t SystemNative_GetTcpGlobalStatistics(TcpGlobalStatistics* retStats)
{
size_t oldlenp;
assert(retStats != NULL);
struct tcpstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.tcp.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(TcpGlobalStatistics)); // out parameter must be initialized.
return -1;
}
retStats->ConnectionsAccepted = systemStats.tcps_accepts;
retStats->ConnectionsInitiated = systemStats.tcps_connattempt;
retStats->CumulativeConnections = systemStats.tcps_connects;
retStats->ErrorsReceived = systemStats.tcps_rcvbadsum + systemStats.tcps_rcvbadoff;
retStats->FailedConnectionAttempts = systemStats.tcps_connattempt - systemStats.tcps_accepts;
retStats->SegmentsReceived = systemStats.tcps_rcvtotal;
retStats->SegmentsResent = systemStats.tcps_sndrexmitpack;
retStats->SegmentsSent = systemStats.tcps_sndtotal;
oldlenp = sizeof(retStats->CurrentConnections);
if (sysctlbyname("net.inet.tcp.pcbcount", &retStats->CurrentConnections, &oldlenp, NULL, 0))
{
retStats->CurrentConnections = 0;
return -1;
}
return 0;
}
int32_t SystemNative_GetIPv4GlobalStatistics(IPv4GlobalStatistics* retStats)
{
#if HAVE_NETINET_IP_VAR_H || HAVE_IOS_NETINET_IP_VAR_H
size_t oldlenp;
assert(retStats != NULL);
struct ipstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.ip.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(IPv4GlobalStatistics)); // out parameter must be initialized.
return -1;
}
retStats->OutboundPackets = systemStats.ips_localout;
retStats->OutputPacketsNoRoute = systemStats.ips_noroute;
retStats->CantFrags = systemStats.ips_cantfrag;
retStats->DatagramsFragmented = systemStats.ips_fragmented;
retStats->PacketsReassembled = systemStats.ips_reassembled;
retStats->TotalPacketsReceived = systemStats.ips_total;
retStats->PacketsDelivered = systemStats.ips_delivered;
retStats->PacketsDiscarded = systemStats.ips_total - systemStats.ips_delivered;
retStats->PacketsForwarded = systemStats.ips_forward;
retStats->BadAddress = systemStats.ips_badaddr;
retStats->BadHeader = systemStats.ips_badhlen; // Also include badaddr?
retStats->UnknownProtos = systemStats.ips_noproto;
oldlenp = sizeof(retStats->DefaultTtl);
if (sysctlbyname("net.inet.ip.ttl", &retStats->DefaultTtl, &oldlenp, NULL, 0))
{
retStats->DefaultTtl = 0;
retStats->Forwarding = 0;
return -1;
}
oldlenp = sizeof(retStats->Forwarding);
if (sysctlbyname("net.inet.ip.forwarding", &retStats->Forwarding, &oldlenp, NULL, 0))
{
retStats->Forwarding = 0;
return -1;
}
return 0;
#else
memset(retStats, 0, sizeof(IPv4GlobalStatistics)); // out parameter must be initialized.
return -1;
#endif
}
int32_t SystemNative_GetUdpGlobalStatistics(UdpGlobalStatistics* retStats)
{
#if HAVE_NETINET_UDP_VAR_H || HAVE_IOS_NETINET_UDP_VAR_H
size_t oldlenp;
assert(retStats != NULL);
struct udpstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.udp.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(UdpGlobalStatistics)); // out parameter must be initialized.
return -1;
}
retStats->DatagramsReceived = systemStats.udps_ipackets;
retStats->DatagramsSent = systemStats.udps_opackets;
retStats->IncomingDiscarded = systemStats.udps_noport;
retStats->IncomingErrors = systemStats.udps_hdrops + systemStats.udps_badsum + systemStats.udps_badlen;
#if defined(__FreeBSD__)
// FreeBSD does not have net.inet.udp.pcbcount
retStats->UdpListeners = 0;
#else
// This may contain both UDP4 and UDP6 listeners.
oldlenp = sizeof(retStats->UdpListeners);
if (sysctlbyname("net.inet.udp.pcbcount", &retStats->UdpListeners, &oldlenp, NULL, 0))
{
retStats->UdpListeners = 0;
return -1;
}
#endif
return 0;
#else
memset(retStats, 0, sizeof(UdpGlobalStatistics)); // out parameter must be initialized.
return -1;
#endif
}
int32_t SystemNative_GetIcmpv4GlobalStatistics(Icmpv4GlobalStatistics* retStats)
{
#if HAVE_NETINET_ICMP_VAR_H || HAVE_IOS_NETINET_ICMP_VAR_H
size_t oldlenp;
assert(retStats != NULL);
struct icmpstat systemStats;
oldlenp = sizeof(systemStats);
if (sysctlbyname("net.inet.icmp.stats", &systemStats, &oldlenp, NULL, 0))
{
memset(retStats, 0, sizeof(Icmpv4GlobalStatistics));
return -1;
}
TYPEOF(systemStats.icps_inhist[0])* inHist = systemStats.icps_inhist;
TYPEOF(systemStats.icps_outhist[0])* outHist = systemStats.icps_outhist;
retStats->AddressMaskRepliesReceived = inHist[ICMP_MASKREPLY];
retStats->AddressMaskRepliesSent = outHist[ICMP_MASKREPLY];
retStats->AddressMaskRequestsReceived = inHist[ICMP_MASKREQ];
retStats->AddressMaskRequestsSent = outHist[ICMP_MASKREQ];
retStats->DestinationUnreachableMessagesReceived = inHist[ICMP_UNREACH];
retStats->DestinationUnreachableMessagesSent = outHist[ICMP_UNREACH];
retStats->EchoRepliesReceived = inHist[ICMP_ECHOREPLY];
retStats->EchoRepliesSent = outHist[ICMP_ECHOREPLY];
retStats->EchoRequestsReceived = inHist[ICMP_ECHO];
retStats->EchoRequestsSent = outHist[ICMP_ECHO];
retStats->ParameterProblemsReceived = inHist[ICMP_PARAMPROB];
retStats->ParameterProblemsSent = outHist[ICMP_PARAMPROB];
retStats->RedirectsReceived = inHist[ICMP_REDIRECT];
retStats->RedirectsSent = outHist[ICMP_REDIRECT];
retStats->SourceQuenchesReceived = inHist[ICMP_SOURCEQUENCH];
retStats->SourceQuenchesSent = outHist[ICMP_SOURCEQUENCH];
retStats->TimeExceededMessagesReceived = inHist[ICMP_TIMXCEED];
retStats->TimeExceededMessagesSent = outHist[ICMP_TIMXCEED];
retStats->TimestampRepliesReceived = inHist[ICMP_TSTAMPREPLY];
retStats->TimestampRepliesSent = outHist[ICMP_TSTAMPREPLY];
retStats->TimestampRequestsReceived = inHist[ICMP_TSTAMP];
retStats->TimestampRequestsSent = outHist[ICMP_TSTAMP];
return 0;
#else
memset(retStats, 0, sizeof(Icmpv4GlobalStatistics)); // out parameter must be initialized.
return -1;
#endif
}
int32_t SystemNative_GetIcmpv6GlobalStatistics(Icmpv6GlobalStatistics* retStats)
{
assert(retStats != NULL);
size_t oldlenp = (size_t)atomic_load(&icmp6statSize);
const char* sysctlName = "net.inet6.icmp6.stats";
void* buffer = malloc(oldlenp);
if (!buffer)
{
memset(retStats, 0, sizeof(Icmpv6GlobalStatistics));
errno = ENOMEM;
return -1;
}
int result = sysctlbyname(sysctlName, buffer, &oldlenp, NULL, 0);
if (result && errno == ENOMEM)
{
// We did not provide enough memory.
// macOS 11.0 added new member to icmp6stat so as FreeBSD reported changes between versions.
oldlenp = GetEstimatedSize(sysctlName);
free(buffer);
buffer = malloc(oldlenp);
if (!buffer)
{
memset(retStats, 0, sizeof(Icmpv6GlobalStatistics));
errno = ENOMEM;
return -1;
}
result = sysctlbyname(sysctlName, buffer, &oldlenp, NULL, 0);
if (result == 0)
{
// if the call succeeded, update icmp6statSize
atomic_store(&icmp6statSize, oldlenp);
}
}
if (result)
{
if (buffer)
{
free(buffer);
}
memset(retStats, 0, sizeof(Icmpv6GlobalStatistics));
return -1;
}
uint64_t* inHist = ((struct icmp6stat*)(buffer))->icp6s_inhist;
uint64_t* outHist = ((struct icmp6stat*)(buffer))->icp6s_outhist;
retStats->DestinationUnreachableMessagesReceived = inHist[ICMP6_DST_UNREACH];
retStats->DestinationUnreachableMessagesSent = outHist[ICMP6_DST_UNREACH];
retStats->EchoRepliesReceived = inHist[ICMP6_ECHO_REPLY];
retStats->EchoRepliesSent = outHist[ICMP6_ECHO_REPLY];
retStats->EchoRequestsReceived = inHist[ICMP6_ECHO_REQUEST];
retStats->EchoRequestsSent = outHist[ICMP6_ECHO_REQUEST];
retStats->MembershipQueriesReceived = inHist[ICMP6_MEMBERSHIP_QUERY];
retStats->MembershipQueriesSent = outHist[ICMP6_MEMBERSHIP_QUERY];
retStats->MembershipReductionsReceived = inHist[ICMP6_MEMBERSHIP_REDUCTION];
retStats->MembershipReductionsSent = outHist[ICMP6_MEMBERSHIP_REDUCTION];
retStats->MembershipReportsReceived = inHist[ICMP6_MEMBERSHIP_REPORT];
retStats->MembershipReportsSent = outHist[ICMP6_MEMBERSHIP_REPORT];
retStats->NeighborAdvertisementsReceived = inHist[ND_NEIGHBOR_ADVERT];
retStats->NeighborAdvertisementsSent = outHist[ND_NEIGHBOR_ADVERT];
retStats->NeighborSolicitsReceived = inHist[ND_NEIGHBOR_SOLICIT];
retStats->NeighborSolicitsSent = outHist[ND_NEIGHBOR_SOLICIT];
retStats->PacketTooBigMessagesReceived = inHist[ICMP6_PACKET_TOO_BIG];
retStats->PacketTooBigMessagesSent = outHist[ICMP6_PACKET_TOO_BIG];
retStats->ParameterProblemsReceived = inHist[ICMP6_PARAM_PROB];
retStats->ParameterProblemsSent = outHist[ICMP6_PARAM_PROB];
retStats->RedirectsReceived = inHist[ND_REDIRECT];
retStats->RedirectsSent = outHist[ND_REDIRECT];
retStats->RouterAdvertisementsReceived = inHist[ND_ROUTER_ADVERT];
retStats->RouterAdvertisementsSent = outHist[ND_ROUTER_ADVERT];
retStats->RouterSolicitsReceived = inHist[ND_ROUTER_SOLICIT];
retStats->RouterSolicitsSent = outHist[ND_ROUTER_SOLICIT];
retStats->TimeExceededMessagesReceived = inHist[ICMP6_TIME_EXCEEDED];
retStats->TimeExceededMessagesSent = outHist[ICMP6_TIME_EXCEEDED];
free(buffer);
return 0;
}
int32_t SystemNative_GetEstimatedTcpConnectionCount()
{
int32_t count;
size_t oldlenp = sizeof(count);
sysctlbyname("net.inet.tcp.pcbcount", &count, &oldlenp, NULL, 0);
return count;
}
#ifdef __FreeBSD__
int32_t SystemNative_GetActiveTcpConnectionInfos(__attribute__((unused)) NativeTcpConnectionInformation* infos, int32_t* infoCount)
{
*infoCount = 0;
return 0;
}
#else
int32_t SystemNative_GetActiveTcpConnectionInfos(NativeTcpConnectionInformation* infos, int32_t* infoCount)
{
assert(infos != NULL);
assert(infoCount != NULL);
const char* sysctlName = "net.inet.tcp.pcblist";
size_t estimatedSize = GetEstimatedSize(sysctlName);
uint8_t* buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
void* newp = NULL;
size_t newlen = 0;
while (sysctlbyname(sysctlName, buffer, &estimatedSize, newp, newlen) != 0)
{
free(buffer);
size_t tmpEstimatedSize;
if (!multiply_s(estimatedSize, (size_t)2, &tmpEstimatedSize) ||
(buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t))) == NULL)
{
errno = ENOMEM;
return -1;
}
estimatedSize = tmpEstimatedSize;
}
int32_t count = (int32_t)(estimatedSize / sizeof(struct xtcpcb));
if (count > *infoCount)
{
// Not enough space in caller-supplied buffer.
free(buffer);
*infoCount = count;
return -1;
}
*infoCount = count;
// sizeof(struct xtcpcb) == 524
struct tcpcb tcp_pcb;
struct inpcb in_pcb;
struct xinpgen* xHeadPtr;
int32_t connectionIndex = -1;
xHeadPtr = (struct xinpgen*)buffer;
for (xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len);
xHeadPtr->xig_len >= sizeof(struct xtcpcb);
xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len))
{
connectionIndex++;
struct xtcpcb* head_xtcpb = (struct xtcpcb*)xHeadPtr;
tcp_pcb = head_xtcpb->xt_tp;
in_pcb = head_xtcpb->xt_inp;
NativeTcpConnectionInformation* ntci = &infos[connectionIndex];
ntci->State = SystemNative_MapTcpState(tcp_pcb.t_state);
uint8_t vflag = in_pcb.inp_vflag; // INP_IPV4 or INP_IPV6
if ((vflag & INP_IPV4) == INP_IPV4)
{
memcpy_s(&ntci->LocalEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.inp_laddr.s_addr, 4);
memcpy_s(&ntci->RemoteEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.inp_faddr.s_addr, 4);
ntci->LocalEndPoint.NumAddressBytes = 4;
ntci->RemoteEndPoint.NumAddressBytes = 4;
}
else
{
memcpy_s(&ntci->LocalEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.in6p_laddr.s6_addr, 16);
memcpy_s(&ntci->RemoteEndPoint.AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.in6p_faddr.s6_addr, 16);
ntci->LocalEndPoint.NumAddressBytes = 16;
ntci->RemoteEndPoint.NumAddressBytes = 16;
}
ntci->LocalEndPoint.Port = ntohs(in_pcb.inp_lport);
ntci->RemoteEndPoint.Port = ntohs(in_pcb.inp_fport);
}
free(buffer);
return 0;
}
#endif
int32_t SystemNative_GetEstimatedUdpListenerCount()
{
int32_t count;
size_t oldlenp = sizeof(count);
sysctlbyname("net.inet.udp.pcbcount", &count, &oldlenp, NULL, 0);
return count;
}
#ifdef __FreeBSD__
int32_t SystemNative_GetActiveUdpListeners(__attribute__((unused)) IPEndPointInfo* infos, int32_t* infoCount)
{
*infoCount = 0;
return 0;
}
#else
int32_t SystemNative_GetActiveUdpListeners(IPEndPointInfo* infos, int32_t* infoCount)
{
assert(infos != NULL);
assert(infoCount != NULL);
const char* sysctlName = "net.inet.udp.pcblist";
size_t estimatedSize = GetEstimatedSize(sysctlName);
uint8_t* buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
void* newp = NULL;
size_t newlen = 0;
while (sysctlbyname(sysctlName, buffer, &estimatedSize, newp, newlen) != 0)
{
free(buffer);
size_t tmpEstimatedSize;
if (!multiply_s(estimatedSize, (size_t)2, &tmpEstimatedSize) ||
(buffer = (uint8_t*)malloc(estimatedSize * sizeof(uint8_t))) == NULL)
{
errno = ENOMEM;
return -1;
}
estimatedSize = tmpEstimatedSize;
}
int32_t count = (int32_t)(estimatedSize / sizeof(struct xtcpcb));
if (count > *infoCount)
{
// Not enough space in caller-supplied buffer.
free(buffer);
*infoCount = count;
return -1;
}
*infoCount = count;
struct inpcb in_pcb;
struct xinpgen* xHeadPtr;
int32_t connectionIndex = -1;
xHeadPtr = (struct xinpgen*)buffer;
for (xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len);
xHeadPtr->xig_len >= sizeof(struct xinpcb);
xHeadPtr = (struct xinpgen*)((uint8_t*)xHeadPtr + xHeadPtr->xig_len))
{
connectionIndex++;
struct xinpcb* head_xinpcb = (struct xinpcb*)xHeadPtr;
in_pcb = head_xinpcb->xi_inp;
IPEndPointInfo* iepi = &infos[connectionIndex];
uint8_t vflag = in_pcb.inp_vflag; // INP_IPV4 or INP_IPV6
if ((vflag & INP_IPV4) == INP_IPV4)
{
memcpy_s(iepi->AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.inp_laddr.s_addr, 4);
iepi->NumAddressBytes = 4;
}
else
{
memcpy_s(iepi->AddressBytes, sizeof_member(IPEndPointInfo, AddressBytes), &in_pcb.in6p_laddr.s6_addr, 16);
iepi->NumAddressBytes = 16;
}
iepi->Port = ntohs(in_pcb.inp_lport);
}
free(buffer);
return 0;
}
#endif
int32_t SystemNative_GetNativeIPInterfaceStatistics(char* interfaceName, NativeIPInterfaceStatistics* retStats)
{
assert(interfaceName != NULL && retStats != NULL);
unsigned int interfaceIndex = if_nametoindex(interfaceName);
if (interfaceIndex == 0)
{
// An invalid interface name was given (doesn't exist).
return -1;
}
#if HAVE_IF_MSGHDR2
int statisticsMib[] = {CTL_NET, PF_ROUTE, 0, 0, NET_RT_IFLIST2, (int)interfaceIndex};
#else
int statisticsMib[] = {CTL_NET, PF_ROUTE, 0, 0, NET_RT_IFLIST, (int)interfaceIndex};
#endif
size_t len;
// Get estimated data length
if (sysctl(statisticsMib, 6, NULL, &len, NULL, 0) == -1)
{
memset(retStats, 0, sizeof(NativeIPInterfaceStatistics));
return -1;
}
uint8_t* buffer = (uint8_t*)malloc(len * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
if (sysctl(statisticsMib, 6, buffer, &len, NULL, 0) == -1)
{
// Not enough space.
free(buffer);
memset(retStats, 0, sizeof(NativeIPInterfaceStatistics));
return -1;
}
for (uint8_t* headPtr = buffer; headPtr <= buffer + len;
headPtr += ((struct if_msghdr*)headPtr)->ifm_msglen)
{
struct if_msghdr* ifHdr = (struct if_msghdr*)headPtr;
#if HAVE_IF_MSGHDR2
if (ifHdr->ifm_index == interfaceIndex && ifHdr->ifm_type == RTM_IFINFO2)
{
struct if_msghdr2* ifHdr2 = (struct if_msghdr2*)ifHdr;
retStats->SendQueueLength = (uint64_t)ifHdr2->ifm_snd_maxlen;
struct if_data64 systemStats = ifHdr2->ifm_data;
#else
if (ifHdr->ifm_index == interfaceIndex && ifHdr->ifm_type == RTM_IFINFO)
{
struct if_msghdr* ifHdr2 = (struct if_msghdr*)ifHdr;
retStats->SendQueueLength = 0;
struct if_data systemStats = ifHdr2->ifm_data;
#endif
retStats->Mtu = systemStats.ifi_mtu;
retStats->Speed = systemStats.ifi_baudrate; // bits per second.
retStats->InPackets = systemStats.ifi_ipackets;
retStats->InErrors = systemStats.ifi_ierrors;
retStats->OutPackets = systemStats.ifi_opackets;
retStats->OutErrors = systemStats.ifi_oerrors;
retStats->InBytes = systemStats.ifi_ibytes;
retStats->OutBytes = systemStats.ifi_obytes;
retStats->InMulticastPackets = systemStats.ifi_imcasts;
retStats->OutMulticastPackets = systemStats.ifi_omcasts;
retStats->InDrops = systemStats.ifi_iqdrops;
retStats->InNoProto = systemStats.ifi_noproto;
retStats->Flags = 0;
if (ifHdr->ifm_flags & IFF_UP)
{
retStats->Flags |= InterfaceUp;
#if HAVE_NET_IFMEDIA_H || HAVE_IOS_NET_IFMEDIA_H
int fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0) {
retStats->Flags |= InterfaceError;
}
else
{
struct ifmediareq ifmr;
memset(&ifmr, 0, sizeof(ifmr));
strncpy(ifmr.ifm_name, interfaceName, sizeof(ifmr.ifm_name));
if (ioctl(fd, SIOCGIFMEDIA, (caddr_t)&ifmr) < 0)
{
if (errno == EOPNOTSUPP || errno == EINVAL)
{
// Virtual interfaces like loopback do not have media.
// Assume they are up when administrative state is up.
retStats->Flags |= InterfaceHasLink;
}
else
{
retStats->Flags |= InterfaceError;
}
}
else if ((ifmr.ifm_status & IFM_AVALID) == 0)
{
// WI-FI on macOS sometimes does not report link when interface is disabled. (still has _UP flag)
// For other interface types, report Unknown status.
if (IFM_TYPE(ifmr.ifm_current) != IFM_IEEE80211)
{
retStats->Flags |= InterfaceError;
}
}
else
{
if (ifmr.ifm_status & IFM_ACTIVE)
{
retStats->Flags |= InterfaceHasLink;
}
}
close(fd);
}
#else
retStats->Flags |= InterfaceError;
#endif
}
if (ifHdr->ifm_flags & (IFF_MULTICAST | IFF_ALLMULTI))
{
retStats->Flags |= InterfaceSupportsMulticast;
}
free(buffer);
return 0;
}
}
// No statistics were found with the given interface index; shouldn't happen.
free(buffer);
memset(retStats, 0, sizeof(NativeIPInterfaceStatistics));
return -1;
}
int32_t SystemNative_GetNumRoutes()
{
int32_t count = 0;
#if HAVE_RT_MSGHDR2
int routeDumpMib[] = {CTL_NET, PF_ROUTE, 0, 0, NET_RT_DUMP, 0};
size_t len;
if (sysctl(routeDumpMib, 6, NULL, &len, NULL, 0) == -1)
{
return -1;
}
uint8_t* buffer = (uint8_t*)malloc(len * sizeof(uint8_t));
if (buffer == NULL)
{
errno = ENOMEM;
return -1;
}
if (sysctl(routeDumpMib, 6, buffer, &len, NULL, 0) == -1)
{
free(buffer);
return -1;
}
uint8_t* headPtr = buffer;
struct rt_msghdr2* rtmsg;
for (size_t i = 0; i < len; i += rtmsg->rtm_msglen)
{
rtmsg = (struct rt_msghdr2*)&buffer[i];
if (rtmsg->rtm_flags & RTF_UP)
{
count++;
}
headPtr += rtmsg->rtm_msglen;
}
free(buffer);
#endif // HAVE_RT_MSGHDR2
return count;
}
#else
int32_t SystemNative_GetTcpGlobalStatistics(TcpGlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetIPv4GlobalStatistics(IPv4GlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetUdpGlobalStatistics(UdpGlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetIcmpv4GlobalStatistics(Icmpv4GlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetIcmpv6GlobalStatistics(Icmpv6GlobalStatistics* retStats)
{
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetEstimatedTcpConnectionCount(void)
{
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetActiveTcpConnectionInfos(NativeTcpConnectionInformation* infos, int32_t* infoCount)
{
(void)infos;
(void)infoCount;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetEstimatedUdpListenerCount(void)
{
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetActiveUdpListeners(IPEndPointInfo* infos, int32_t* infoCount)
{
(void)infos;
(void)infoCount;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetNativeIPInterfaceStatistics(char* interfaceName, NativeIPInterfaceStatistics* retStats)
{
(void)interfaceName;
(void)retStats;
errno = ENOTSUP;
return -1;
}
int32_t SystemNative_GetNumRoutes(void)
{
errno = ENOTSUP;
return -1;
}
#endif // HAVE_NETINET_TCP_VAR_H
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/unwinder/unwinder.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#ifndef __unwinder_h__
#define __unwinder_h__
//---------------------------------------------------------------------------------------
//
// OOPStackUnwinder is the abstract base class for unwinding stack frames. Each of the two 64-bit platforms
// has its own derived class. Although the name of this class and its derived classes have changed, they
// are actually borrowed from dbghelp.dll. (StackWalk64() is built on top of these classes.) We have ripped
// out everything we don't need such as symbol lookup and various state, and keep just enough code to support
// VirtualUnwind(). The managed debugging infrastructure can't call RtlVirtualUnwind() because it doesn't
// work from out-of-processr
//
// Notes:
// To see what we have changed in the borrowed source, you can diff the original version and our version.
// For example, on X64, you can diff clr\src\Debug\daccess\amd64\dbs_stack_x64.cpp (the original) and
// clr\src\Debug\daccess\amd64\unwinder_amd64.cpp.
//
class OOPStackUnwinder
{
protected:
// Given a control PC, return the base of the module it is in. For jitted managed code, this is the
// start of the code heap.
static HRESULT GetModuleBase( DWORD64 address,
_Out_ PDWORD64 pdwBase);
// Given a control PC, return the function entry of the functoin it is in.
static HRESULT GetFunctionEntry( DWORD64 address,
_Out_writes_(cbBuffer) PVOID pBuffer,
DWORD cbBuffer);
};
#endif // __unwinder_h__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#ifndef __unwinder_h__
#define __unwinder_h__
//---------------------------------------------------------------------------------------
//
// OOPStackUnwinder is the abstract base class for unwinding stack frames. Each of the two 64-bit platforms
// has its own derived class. Although the name of this class and its derived classes have changed, they
// are actually borrowed from dbghelp.dll. (StackWalk64() is built on top of these classes.) We have ripped
// out everything we don't need such as symbol lookup and various state, and keep just enough code to support
// VirtualUnwind(). The managed debugging infrastructure can't call RtlVirtualUnwind() because it doesn't
// work from out-of-processr
//
// Notes:
// To see what we have changed in the borrowed source, you can diff the original version and our version.
// For example, on X64, you can diff clr\src\Debug\daccess\amd64\dbs_stack_x64.cpp (the original) and
// clr\src\Debug\daccess\amd64\unwinder_amd64.cpp.
//
class OOPStackUnwinder
{
protected:
// Given a control PC, return the base of the module it is in. For jitted managed code, this is the
// start of the code heap.
static HRESULT GetModuleBase( DWORD64 address,
_Out_ PDWORD64 pdwBase);
// Given a control PC, return the function entry of the functoin it is in.
static HRESULT GetFunctionEntry( DWORD64 address,
_Out_writes_(cbBuffer) PVOID pBuffer,
DWORD cbBuffer);
};
#endif // __unwinder_h__
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/pal/src/libunwind/src/ppc64/is_fpreg.c | /* libunwind - a platform-independent unwind library
Copyright (C) 2006-2007 IBM
Contributed by
Corey Ashford <[email protected]>
Jose Flavio Aguilar Paulino <[email protected]> <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "libunwind_i.h"
int
unw_is_fpreg (int regnum)
{
return (regnum >= UNW_PPC64_F0 && regnum <= UNW_PPC64_F31);
}
| /* libunwind - a platform-independent unwind library
Copyright (C) 2006-2007 IBM
Contributed by
Corey Ashford <[email protected]>
Jose Flavio Aguilar Paulino <[email protected]> <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "libunwind_i.h"
int
unw_is_fpreg (int regnum)
{
return (regnum >= UNW_PPC64_F0 && regnum <= UNW_PPC64_F31);
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/pal/src/libunwind/include/win/sys/ucontext.h | // This is an incomplete & imprecice implementation of the *nix file
// by the same name
// Since this is only intended for VC++ compilers
// use #pragma once instead of guard macros
#pragma once
#ifdef _MSC_VER // Only for cross compilation to windows
#include <inttypes.h>
#if defined(__linux__) && defined(__x86_64__)
# define SIZEOF_UCONTEXT 936
#elif defined(__linux__) && defined(__aarch64__)
# define SIZEOF_UCONTEXT 4560
#elif defined(__linux__) && defined(__arm__)
# define SIZEOF_UCONTEXT 744
#elif !defined(SIZEOF_UCONTEXT)
// It is not clear whether the sizeof(ucontext_t) is important
// While compiling on Windows the members are not referenced...
// However the size maybe important during a case or a memcpy
// Barring a full audit it could be important so require the size to be defined
# error SIZEOF_UCONTEXT is unknown for this target
#endif
typedef struct ucontext
{
uint8_t content[SIZEOF_UCONTEXT];
} ucontext_t;
#ifdef __aarch64__
// These types are used in the definition of the aarch64 unw_tdep_context_t
// They are not used in UNW_REMOTE_ONLY, so typedef them as something
typedef long sigset_t;
typedef long stack_t;
// Windows SDK defines reserved. It conflicts with arm64 ucontext
// Undefine it
#undef __reserved
#endif
#endif // _MSC_VER
| // This is an incomplete & imprecice implementation of the *nix file
// by the same name
// Since this is only intended for VC++ compilers
// use #pragma once instead of guard macros
#pragma once
#ifdef _MSC_VER // Only for cross compilation to windows
#include <inttypes.h>
#if defined(__linux__) && defined(__x86_64__)
# define SIZEOF_UCONTEXT 936
#elif defined(__linux__) && defined(__aarch64__)
# define SIZEOF_UCONTEXT 4560
#elif defined(__linux__) && defined(__arm__)
# define SIZEOF_UCONTEXT 744
#elif !defined(SIZEOF_UCONTEXT)
// It is not clear whether the sizeof(ucontext_t) is important
// While compiling on Windows the members are not referenced...
// However the size maybe important during a case or a memcpy
// Barring a full audit it could be important so require the size to be defined
# error SIZEOF_UCONTEXT is unknown for this target
#endif
typedef struct ucontext
{
uint8_t content[SIZEOF_UCONTEXT];
} ucontext_t;
#ifdef __aarch64__
// These types are used in the definition of the aarch64 unw_tdep_context_t
// They are not used in UNW_REMOTE_ONLY, so typedef them as something
typedef long sigset_t;
typedef long stack_t;
// Windows SDK defines reserved. It conflicts with arm64 ucontext
// Undefine it
#undef __reserved
#endif
#endif // _MSC_VER
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/SubtractRoundedHighNarrowingUpper.Vector128.UInt32.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void SubtractRoundedHighNarrowingUpper_Vector128_UInt32()
{
var test = new SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, UInt64[] inArray2, UInt64[] inArray3, UInt32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt64>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt64, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<UInt32> _fld1;
public Vector128<UInt64> _fld2;
public Vector128<UInt64> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt64, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32 testClass)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32 testClass)
{
fixed (Vector64<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt64>* pFld2 = &_fld2)
fixed (Vector128<UInt64>* pFld3 = &_fld3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt64*)(pFld2)),
AdvSimd.LoadVector128((UInt64*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt32>>() / sizeof(UInt32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static UInt64[] _data2 = new UInt64[Op2ElementCount];
private static UInt64[] _data3 = new UInt64[Op3ElementCount];
private static Vector64<UInt32> _clsVar1;
private static Vector128<UInt64> _clsVar2;
private static Vector128<UInt64> _clsVar3;
private Vector64<UInt32> _fld1;
private Vector128<UInt64> _fld2;
private Vector128<UInt64> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar3), ref Unsafe.As<UInt64, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
}
public SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld3), ref Unsafe.As<UInt64, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
_dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.SubtractRoundedHighNarrowingUpper), new Type[] { typeof(Vector64<UInt32>), typeof(Vector128<UInt64>), typeof(Vector128<UInt64>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.SubtractRoundedHighNarrowingUpper), new Type[] { typeof(Vector64<UInt32>), typeof(Vector128<UInt64>), typeof(Vector128<UInt64>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<UInt32>* pClsVar1 = &_clsVar1)
fixed (Vector128<UInt64>* pClsVar2 = &_clsVar2)
fixed (Vector128<UInt64>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pClsVar1)),
AdvSimd.LoadVector128((UInt64*)(pClsVar2)),
AdvSimd.LoadVector128((UInt64*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray3Ptr);
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray3Ptr));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32();
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32();
fixed (Vector64<UInt32>* pFld1 = &test._fld1)
fixed (Vector128<UInt64>* pFld2 = &test._fld2)
fixed (Vector128<UInt64>* pFld3 = &test._fld3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt64*)(pFld2)),
AdvSimd.LoadVector128((UInt64*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt64>* pFld2 = &_fld2)
fixed (Vector128<UInt64>* pFld3 = &_fld3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt64*)(pFld2)),
AdvSimd.LoadVector128((UInt64*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(&test._fld1)),
AdvSimd.LoadVector128((UInt64*)(&test._fld2)),
AdvSimd.LoadVector128((UInt64*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<UInt32> op1, Vector128<UInt64> op2, Vector128<UInt64> op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt64[] inArray2 = new UInt64[Op2ElementCount];
UInt64[] inArray3 = new UInt64[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt64[] inArray2 = new UInt64[Op2ElementCount];
UInt64[] inArray3 = new UInt64[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(UInt32[] firstOp, UInt64[] secondOp, UInt64[] thirdOp, UInt32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.SubtractRoundedHighNarrowingUpper(firstOp, secondOp, thirdOp, i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.SubtractRoundedHighNarrowingUpper)}<UInt32>(Vector64<UInt32>, Vector128<UInt64>, Vector128<UInt64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void SubtractRoundedHighNarrowingUpper_Vector128_UInt32()
{
var test = new SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, UInt64[] inArray2, UInt64[] inArray3, UInt32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt64>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt64, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<UInt32> _fld1;
public Vector128<UInt64> _fld2;
public Vector128<UInt64> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt64, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32 testClass)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32 testClass)
{
fixed (Vector64<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt64>* pFld2 = &_fld2)
fixed (Vector128<UInt64>* pFld3 = &_fld3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt64*)(pFld2)),
AdvSimd.LoadVector128((UInt64*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt32>>() / sizeof(UInt32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static UInt64[] _data2 = new UInt64[Op2ElementCount];
private static UInt64[] _data3 = new UInt64[Op3ElementCount];
private static Vector64<UInt32> _clsVar1;
private static Vector128<UInt64> _clsVar2;
private static Vector128<UInt64> _clsVar3;
private Vector64<UInt32> _fld1;
private Vector128<UInt64> _fld2;
private Vector128<UInt64> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar3), ref Unsafe.As<UInt64, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
}
public SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld3), ref Unsafe.As<UInt64, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt64(); }
_dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.SubtractRoundedHighNarrowingUpper), new Type[] { typeof(Vector64<UInt32>), typeof(Vector128<UInt64>), typeof(Vector128<UInt64>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.SubtractRoundedHighNarrowingUpper), new Type[] { typeof(Vector64<UInt32>), typeof(Vector128<UInt64>), typeof(Vector128<UInt64>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<UInt32>* pClsVar1 = &_clsVar1)
fixed (Vector128<UInt64>* pClsVar2 = &_clsVar2)
fixed (Vector128<UInt64>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pClsVar1)),
AdvSimd.LoadVector128((UInt64*)(pClsVar2)),
AdvSimd.LoadVector128((UInt64*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray3Ptr);
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector128((UInt64*)(_dataTable.inArray3Ptr));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32();
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__SubtractRoundedHighNarrowingUpper_Vector128_UInt32();
fixed (Vector64<UInt32>* pFld1 = &test._fld1)
fixed (Vector128<UInt64>* pFld2 = &test._fld2)
fixed (Vector128<UInt64>* pFld3 = &test._fld3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt64*)(pFld2)),
AdvSimd.LoadVector128((UInt64*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt64>* pFld2 = &_fld2)
fixed (Vector128<UInt64>* pFld3 = &_fld3)
{
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt64*)(pFld2)),
AdvSimd.LoadVector128((UInt64*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.SubtractRoundedHighNarrowingUpper(
AdvSimd.LoadVector64((UInt32*)(&test._fld1)),
AdvSimd.LoadVector128((UInt64*)(&test._fld2)),
AdvSimd.LoadVector128((UInt64*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<UInt32> op1, Vector128<UInt64> op2, Vector128<UInt64> op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt64[] inArray2 = new UInt64[Op2ElementCount];
UInt64[] inArray3 = new UInt64[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt64[] inArray2 = new UInt64[Op2ElementCount];
UInt64[] inArray3 = new UInt64[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<UInt64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(UInt32[] firstOp, UInt64[] secondOp, UInt64[] thirdOp, UInt32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.SubtractRoundedHighNarrowingUpper(firstOp, secondOp, thirdOp, i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.SubtractRoundedHighNarrowingUpper)}<UInt32>(Vector64<UInt32>, Vector128<UInt64>, Vector128<UInt64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/mini/image-writer.c | /**
* \file
* Creation of object files or assembly files using the same interface.
*
* Author:
* Dietmar Maurer ([email protected])
* Zoltan Varga ([email protected])
* Paolo Molaro ([email protected])
* Johan Lorensson ([email protected])
*
* (C) 2002 Ximian, Inc.
*/
#include "config.h"
#include <sys/types.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
#include <fcntl.h>
#include <ctype.h>
#include <string.h>
#include <errno.h>
#include <sys/stat.h>
#include "image-writer.h"
#include "mini.h"
/*
* The used assembler dialect
* TARGET_ASM_APPLE == apple assembler on OSX
* TARGET_ASM_GAS == GNU assembler
*/
#if !defined(TARGET_ASM_APPLE) && !defined(TARGET_ASM_GAS)
#if defined(TARGET_MACH)
#define TARGET_ASM_APPLE
#else
#define TARGET_ASM_GAS
#endif
#endif
/*
* Defines for the directives used by different assemblers
*/
#if defined(TARGET_POWERPC) || defined(TARGET_MACH)
#define AS_STRING_DIRECTIVE ".asciz"
#else
#define AS_STRING_DIRECTIVE ".string"
#endif
#define AS_INT32_DIRECTIVE ".long"
#define AS_INT64_DIRECTIVE ".quad"
#if (defined(TARGET_AMD64) || defined(TARGET_POWERPC64)) && !defined(MONO_ARCH_ILP32)
#define AS_POINTER_DIRECTIVE ".quad"
#elif defined(TARGET_ARM64)
#ifdef MONO_ARCH_ILP32
#define AS_POINTER_DIRECTIVE AS_INT32_DIRECTIVE
#else
#ifdef TARGET_ASM_APPLE
#define AS_POINTER_DIRECTIVE ".quad"
#else
#define AS_POINTER_DIRECTIVE ".xword"
#endif
#endif
#else
#define AS_POINTER_DIRECTIVE ".long"
#endif
#if defined(TARGET_ASM_APPLE)
#define AS_INT16_DIRECTIVE ".short"
#elif defined(TARGET_ASM_GAS) && defined(TARGET_WIN32)
#define AS_INT16_DIRECTIVE ".word"
#elif defined(TARGET_ASM_GAS)
#define AS_INT16_DIRECTIVE ".short"
#else
#define AS_INT16_DIRECTIVE ".word"
#endif
#if defined(TARGET_ASM_APPLE)
#define AS_SKIP_DIRECTIVE ".space"
#else
#define AS_SKIP_DIRECTIVE ".skip"
#endif
#if defined(TARGET_ASM_APPLE)
#define AS_GLOBAL_PREFIX "_"
#else
#define AS_GLOBAL_PREFIX ""
#endif
#ifdef TARGET_ASM_APPLE
#define AS_TEMP_LABEL_PREFIX "L"
#else
#define AS_TEMP_LABEL_PREFIX ".L"
#endif
#define ROUND_DOWN(VALUE,SIZE) ((VALUE) & ~((SIZE) - 1))
/* emit mode */
enum {
EMIT_NONE,
EMIT_BYTE,
EMIT_WORD,
EMIT_LONG
};
struct _MonoImageWriter {
MonoMemPool *mempool;
char *outfile;
const char *current_section;
int current_subsection;
const char *section_stack [16];
int subsection_stack [16];
int stack_pos;
FILE *fp;
/* Asm writer */
char *tmpfname;
int mode; /* emit mode */
int col_count; /* bytes emitted per .byte line */
int label_gen;
};
static G_GNUC_UNUSED int
ilog2(int value)
{
int count = -1;
while (value & ~0xf) count += 4, value >>= 4;
while (value) count++, value >>= 1;
return count;
}
/* ASM WRITER */
static void
asm_writer_emit_start (MonoImageWriter *acfg)
{
#if defined(TARGET_ASM_APPLE)
fprintf (acfg->fp, ".subsections_via_symbols\n");
#endif
}
static int
asm_writer_emit_writeout (MonoImageWriter *acfg)
{
fclose (acfg->fp);
return 0;
}
static void
asm_writer_emit_unset_mode (MonoImageWriter *acfg)
{
if (acfg->mode == EMIT_NONE)
return;
fprintf (acfg->fp, "\n");
acfg->mode = EMIT_NONE;
}
static void
asm_writer_emit_section_change (MonoImageWriter *acfg, const char *section_name, int subsection_index)
{
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ASM_APPLE)
if (strcmp(section_name, ".bss") == 0)
fprintf (acfg->fp, "%s\n", ".data");
else if (strstr (section_name, ".debug") == section_name) {
//g_assert (subsection_index == 0);
fprintf (acfg->fp, ".section __DWARF, __%s,regular,debug\n", section_name + 1);
} else
fprintf (acfg->fp, "%s\n", section_name);
#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_POWERPC)
/* ARM gas doesn't seem to like subsections of .bss */
if (!strcmp (section_name, ".text") || !strcmp (section_name, ".data")) {
fprintf (acfg->fp, "%s %d\n", section_name, subsection_index);
} else {
fprintf (acfg->fp, ".section \"%s\"\n", section_name);
fprintf (acfg->fp, ".subsection %d\n", subsection_index);
}
#elif defined(HOST_WIN32)
fprintf (acfg->fp, ".section %s\n", section_name);
#else
if (!strcmp (section_name, ".text") || !strcmp (section_name, ".data") || !strcmp (section_name, ".bss")) {
fprintf (acfg->fp, "%s %d\n", section_name, subsection_index);
} else {
fprintf (acfg->fp, ".section \"%s\"\n", section_name);
fprintf (acfg->fp, ".subsection %d\n", subsection_index);
}
#endif
}
static
const char *get_label (const char *s)
{
#ifdef TARGET_ASM_APPLE
if (s [0] == '.' && s [1] == 'L')
/* apple uses "L" instead of ".L" to mark temporary labels */
s ++;
#endif
return s;
}
#ifdef TARGET_WIN32
#define GLOBAL_SYMBOL_DEF_SCL 2
#define LOCAL_SYMBOL_DEF_SCL 3
static gboolean
asm_writer_in_data_section (MonoImageWriter *acfg)
{
gboolean in_data_section = FALSE;
const char *data_sections [] = {".data", ".bss", ".rdata"};
for (guchar i = 0; i < G_N_ELEMENTS (data_sections); ++i) {
if (strcmp (acfg->current_section, data_sections [i]) == 0) {
in_data_section = TRUE;
break;
}
}
return in_data_section;
}
static void
asm_writer_emit_symbol_type (MonoImageWriter *acfg, const char *name, gboolean func, gboolean global)
{
asm_writer_emit_unset_mode (acfg);
if (func) {
fprintf (acfg->fp, "\t.def %s; .scl %d; .type 32; .endef\n", name, (global == TRUE ? GLOBAL_SYMBOL_DEF_SCL : LOCAL_SYMBOL_DEF_SCL));
} else {
if (!asm_writer_in_data_section (acfg))
fprintf (acfg->fp, "\t.data\n");
}
return;
}
#else
static void
asm_writer_emit_symbol_type (MonoImageWriter *acfg, const char *name, gboolean func, gboolean global)
{
const char *stype;
if (func)
stype = "function";
else
stype = "object";
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ASM_APPLE)
#elif defined(TARGET_ARM)
fprintf (acfg->fp, "\t.type %s,#%s\n", name, stype);
#else
fprintf (acfg->fp, "\t.type %s,@%s\n", name, stype);
#endif
}
#endif /* TARGET_WIN32 */
static void
asm_writer_emit_global (MonoImageWriter *acfg, const char *name, gboolean func)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t.globl %s\n", name);
asm_writer_emit_symbol_type (acfg, name, func, TRUE);
}
static void
asm_writer_emit_local_symbol (MonoImageWriter *acfg, const char *name, const char *end_label, gboolean func)
{
asm_writer_emit_unset_mode (acfg);
#if !defined(TARGET_ASM_APPLE) && !defined(TARGET_WIN32)
fprintf (acfg->fp, "\t.local %s\n", name);
#endif
asm_writer_emit_symbol_type (acfg, name, func, FALSE);
}
static void
asm_writer_emit_symbol_size (MonoImageWriter *acfg, const char *name, const char *end_label)
{
asm_writer_emit_unset_mode (acfg);
#if !defined(TARGET_ASM_APPLE) && !defined(TARGET_WIN32)
fprintf (acfg->fp, "\t.size %s,%s-%s\n", name, end_label, name);
#endif
}
static void
asm_writer_emit_label (MonoImageWriter *acfg, const char *name)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "%s:\n", get_label (name));
}
static void
asm_writer_emit_string (MonoImageWriter *acfg, const char *value)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t%s \"%s\"\n", AS_STRING_DIRECTIVE, value);
}
static void
asm_writer_emit_line (MonoImageWriter *acfg)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\n");
}
static void
asm_writer_emit_alignment (MonoImageWriter *acfg, int size)
{
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ARM)
fprintf (acfg->fp, "\t.align %d\n", ilog2 (size));
#elif defined(__ppc__) && defined(TARGET_ASM_APPLE)
// the mach-o assembler specifies alignments as powers of 2.
fprintf (acfg->fp, "\t.align %d\t; ilog2\n", ilog2(size));
#elif defined(TARGET_ASM_GAS)
fprintf (acfg->fp, "\t.balign %d\n", size);
#elif defined(TARGET_ASM_APPLE)
fprintf (acfg->fp, "\t.align %d\n", ilog2 (size));
#else
fprintf (acfg->fp, "\t.align %d\n", size);
#endif
}
static void
asm_writer_emit_alignment_fill (MonoImageWriter *acfg, int size, int fill)
{
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ASM_APPLE)
fprintf (acfg->fp, "\t.align %d, 0x%0x\n", ilog2 (size), fill);
#else
asm_writer_emit_alignment (acfg, size);
#endif
}
static void
asm_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t%s %s\n", AS_POINTER_DIRECTIVE, target ? target : "0");
}
static void
asm_writer_emit_pointer (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_unset_mode (acfg);
asm_writer_emit_alignment (acfg, TARGET_SIZEOF_VOID_P);
asm_writer_emit_pointer_unaligned (acfg, target);
}
static char *byte_to_str;
static void
asm_writer_emit_bytes (MonoImageWriter *acfg, const guint8* buf, int size)
{
int i;
if (acfg->mode != EMIT_BYTE) {
acfg->mode = EMIT_BYTE;
acfg->col_count = 0;
}
if (byte_to_str == NULL) {
byte_to_str = g_new0 (char, 256 * 8);
for (i = 0; i < 256; ++i) {
sprintf (byte_to_str + (i * 8), ",%d", i);
}
}
for (i = 0; i < size; ++i, ++acfg->col_count) {
if ((acfg->col_count % 32) == 0)
fprintf (acfg->fp, "\n\t.byte %d", buf [i]);
else
fputs (byte_to_str + (buf [i] * 8), acfg->fp);
}
}
static void
asm_writer_emit_int16 (MonoImageWriter *acfg, int value)
{
if (acfg->mode != EMIT_WORD) {
acfg->mode = EMIT_WORD;
acfg->col_count = 0;
}
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT16_DIRECTIVE);
else
fprintf (acfg->fp, ", ");
fprintf (acfg->fp, "%d", value);
}
static void
asm_writer_emit_int32 (MonoImageWriter *acfg, int value)
{
if (acfg->mode != EMIT_LONG) {
acfg->mode = EMIT_LONG;
acfg->col_count = 0;
}
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
else
fprintf (acfg->fp, ",");
fprintf (acfg->fp, "%d", value);
}
static void
asm_writer_emit_symbol (MonoImageWriter *acfg, const char *symbol)
{
if (acfg->mode != EMIT_LONG) {
acfg->mode = EMIT_LONG;
acfg->col_count = 0;
}
symbol = get_label (symbol);
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
else
fprintf (acfg->fp, ",");
fprintf (acfg->fp, "%s", symbol);
}
static void
asm_writer_emit_symbol_diff (MonoImageWriter *acfg, const char *end, const char* start, int offset)
{
#ifdef TARGET_ASM_APPLE
//char symbol [128];
#endif
if (acfg->mode != EMIT_LONG) {
acfg->mode = EMIT_LONG;
acfg->col_count = 0;
}
// FIXME: This doesn't seem to work on the iphone
#if 0
//#ifdef TARGET_ASM_APPLE
/* The apple assembler needs a separate symbol to be able to handle complex expressions */
sprintf (symbol, "LTMP_SYM%d", acfg->label_gen);
start = get_label (start);
end = get_label (end);
acfg->label_gen ++;
if (offset > 0)
fprintf (acfg->fp, "\n%s=%s - %s + %d", symbol, end, start, offset);
else if (offset < 0)
fprintf (acfg->fp, "\n%s=%s - %s %d", symbol, end, start, offset);
else
fprintf (acfg->fp, "\n%s=%s - %s", symbol, end, start);
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
fprintf (acfg->fp, "%s", symbol);
#else
start = get_label (start);
end = get_label (end);
if (offset == 0 && strcmp (start, ".") != 0) {
char symbol [128];
sprintf (symbol, "%sDIFF_SYM%d", AS_TEMP_LABEL_PREFIX, acfg->label_gen);
acfg->label_gen ++;
fprintf (acfg->fp, "\n%s=%s - %s", symbol, end, start);
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
fprintf (acfg->fp, "%s", symbol);
return;
}
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
else
fprintf (acfg->fp, ",");
if (offset > 0)
fprintf (acfg->fp, "%s - %s + %d", end, start, offset);
else if (offset < 0)
fprintf (acfg->fp, "%s - %s %d", end, start, offset);
else
fprintf (acfg->fp, "%s - %s", end, start);
#endif
}
static void
asm_writer_emit_zero_bytes (MonoImageWriter *acfg, int num)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t%s %d\n", AS_SKIP_DIRECTIVE, num);
}
/* EMIT FUNCTIONS */
void
mono_img_writer_emit_start (MonoImageWriter *acfg)
{
asm_writer_emit_start (acfg);
}
void
mono_img_writer_emit_section_change (MonoImageWriter *acfg, const char *section_name, int subsection_index)
{
asm_writer_emit_section_change (acfg, section_name, subsection_index);
acfg->current_section = section_name;
acfg->current_subsection = subsection_index;
}
void
mono_img_writer_emit_push_section (MonoImageWriter *acfg, const char *section_name, int subsection)
{
g_assert (acfg->stack_pos < 16 - 1);
acfg->section_stack [acfg->stack_pos] = acfg->current_section;
acfg->subsection_stack [acfg->stack_pos] = acfg->current_subsection;
acfg->stack_pos ++;
mono_img_writer_emit_section_change (acfg, section_name, subsection);
}
void
mono_img_writer_emit_pop_section (MonoImageWriter *acfg)
{
g_assert (acfg->stack_pos > 0);
acfg->stack_pos --;
mono_img_writer_emit_section_change (acfg, acfg->section_stack [acfg->stack_pos], acfg->subsection_stack [acfg->stack_pos]);
}
void
mono_img_writer_set_section_addr (MonoImageWriter *acfg, guint64 addr)
{
NOT_IMPLEMENTED;
}
void
mono_img_writer_emit_global (MonoImageWriter *acfg, const char *name, gboolean func)
{
asm_writer_emit_global (acfg, name, func);
}
void
mono_img_writer_emit_local_symbol (MonoImageWriter *acfg, const char *name, const char *end_label, gboolean func)
{
asm_writer_emit_local_symbol (acfg, name, end_label, func);
}
void
mono_img_writer_emit_symbol_size (MonoImageWriter *acfg, const char *name, const char *end_label)
{
asm_writer_emit_symbol_size (acfg, name, end_label);
}
void
mono_img_writer_emit_label (MonoImageWriter *acfg, const char *name)
{
asm_writer_emit_label (acfg, name);
}
void
mono_img_writer_emit_bytes (MonoImageWriter *acfg, const guint8* buf, int size)
{
asm_writer_emit_bytes (acfg, buf, size);
}
void
mono_img_writer_emit_string (MonoImageWriter *acfg, const char *value)
{
asm_writer_emit_string (acfg, value);
}
void
mono_img_writer_emit_line (MonoImageWriter *acfg)
{
asm_writer_emit_line (acfg);
}
void
mono_img_writer_emit_alignment (MonoImageWriter *acfg, int size)
{
asm_writer_emit_alignment (acfg, size);
}
void
mono_img_writer_emit_alignment_fill (MonoImageWriter *acfg, int size, int fill)
{
asm_writer_emit_alignment_fill (acfg, size, fill);
}
void
mono_img_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_pointer_unaligned (acfg, target);
}
void
mono_img_writer_emit_pointer (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_pointer (acfg, target);
}
void
mono_img_writer_emit_int16 (MonoImageWriter *acfg, int value)
{
asm_writer_emit_int16 (acfg, value);
}
void
mono_img_writer_emit_int32 (MonoImageWriter *acfg, int value)
{
asm_writer_emit_int32 (acfg, value);
}
void
mono_img_writer_emit_symbol (MonoImageWriter *acfg, const char *symbol)
{
asm_writer_emit_symbol (acfg, symbol);
}
void
mono_img_writer_emit_symbol_diff (MonoImageWriter *acfg, const char *end, const char* start, int offset)
{
asm_writer_emit_symbol_diff (acfg, end, start, offset);
}
void
mono_img_writer_emit_zero_bytes (MonoImageWriter *acfg, int num)
{
asm_writer_emit_zero_bytes (acfg, num);
}
int
mono_img_writer_emit_writeout (MonoImageWriter *acfg)
{
return asm_writer_emit_writeout (acfg);
}
void
mono_img_writer_emit_byte (MonoImageWriter *acfg, guint8 val)
{
mono_img_writer_emit_bytes (acfg, &val, 1);
}
/*
* Emit a relocation entry of type RELOC_TYPE against symbol SYMBOL at the current PC.
* Do not advance PC.
*/
void
mono_img_writer_emit_reloc (MonoImageWriter *acfg, int reloc_type, const char *symbol, int addend)
{
g_assert_not_reached ();
}
/*
* mono_img_writer_emit_unset_mode:
*
* Flush buffered data so it is safe to write to the output file from outside this
* module. This is a nop for the binary writer.
*/
void
mono_img_writer_emit_unset_mode (MonoImageWriter *acfg)
{
asm_writer_emit_unset_mode (acfg);
}
/*
* mono_img_writer_get_output:
*
* Return the output buffer of a binary writer emitting to memory. The returned memory
* is from malloc, and it is owned by the caller.
*/
guint8*
mono_img_writer_get_output (MonoImageWriter *acfg, guint32 *size)
{
g_assert_not_reached ();
return NULL;
}
/*
* mono_img_writer_create:
*
* Create an image writer writing to FP.
*/
MonoImageWriter*
mono_img_writer_create (FILE *fp)
{
MonoImageWriter *w = g_new0 (MonoImageWriter, 1);
g_assert (fp);
w->fp = fp;
w->mempool = mono_mempool_new ();
return w;
}
void
mono_img_writer_destroy (MonoImageWriter *w)
{
// FIXME: Free all the stuff
mono_mempool_destroy (w->mempool);
g_free (w);
}
gboolean
mono_img_writer_subsections_supported (MonoImageWriter *acfg)
{
#ifdef TARGET_ASM_APPLE
return FALSE;
#else
return TRUE;
#endif
}
FILE *
mono_img_writer_get_fp (MonoImageWriter *acfg)
{
return acfg->fp;
}
const char *
mono_img_writer_get_temp_label_prefix (MonoImageWriter *acfg)
{
return AS_TEMP_LABEL_PREFIX;
}
| /**
* \file
* Creation of object files or assembly files using the same interface.
*
* Author:
* Dietmar Maurer ([email protected])
* Zoltan Varga ([email protected])
* Paolo Molaro ([email protected])
* Johan Lorensson ([email protected])
*
* (C) 2002 Ximian, Inc.
*/
#include "config.h"
#include <sys/types.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h>
#endif
#include <fcntl.h>
#include <ctype.h>
#include <string.h>
#include <errno.h>
#include <sys/stat.h>
#include "image-writer.h"
#include "mini.h"
/*
* The used assembler dialect
* TARGET_ASM_APPLE == apple assembler on OSX
* TARGET_ASM_GAS == GNU assembler
*/
#if !defined(TARGET_ASM_APPLE) && !defined(TARGET_ASM_GAS)
#if defined(TARGET_MACH)
#define TARGET_ASM_APPLE
#else
#define TARGET_ASM_GAS
#endif
#endif
/*
* Defines for the directives used by different assemblers
*/
#if defined(TARGET_POWERPC) || defined(TARGET_MACH)
#define AS_STRING_DIRECTIVE ".asciz"
#else
#define AS_STRING_DIRECTIVE ".string"
#endif
#define AS_INT32_DIRECTIVE ".long"
#define AS_INT64_DIRECTIVE ".quad"
#if (defined(TARGET_AMD64) || defined(TARGET_POWERPC64)) && !defined(MONO_ARCH_ILP32)
#define AS_POINTER_DIRECTIVE ".quad"
#elif defined(TARGET_ARM64)
#ifdef MONO_ARCH_ILP32
#define AS_POINTER_DIRECTIVE AS_INT32_DIRECTIVE
#else
#ifdef TARGET_ASM_APPLE
#define AS_POINTER_DIRECTIVE ".quad"
#else
#define AS_POINTER_DIRECTIVE ".xword"
#endif
#endif
#else
#define AS_POINTER_DIRECTIVE ".long"
#endif
#if defined(TARGET_ASM_APPLE)
#define AS_INT16_DIRECTIVE ".short"
#elif defined(TARGET_ASM_GAS) && defined(TARGET_WIN32)
#define AS_INT16_DIRECTIVE ".word"
#elif defined(TARGET_ASM_GAS)
#define AS_INT16_DIRECTIVE ".short"
#else
#define AS_INT16_DIRECTIVE ".word"
#endif
#if defined(TARGET_ASM_APPLE)
#define AS_SKIP_DIRECTIVE ".space"
#else
#define AS_SKIP_DIRECTIVE ".skip"
#endif
#if defined(TARGET_ASM_APPLE)
#define AS_GLOBAL_PREFIX "_"
#else
#define AS_GLOBAL_PREFIX ""
#endif
#ifdef TARGET_ASM_APPLE
#define AS_TEMP_LABEL_PREFIX "L"
#else
#define AS_TEMP_LABEL_PREFIX ".L"
#endif
#define ROUND_DOWN(VALUE,SIZE) ((VALUE) & ~((SIZE) - 1))
/* emit mode */
enum {
EMIT_NONE,
EMIT_BYTE,
EMIT_WORD,
EMIT_LONG
};
struct _MonoImageWriter {
MonoMemPool *mempool;
char *outfile;
const char *current_section;
int current_subsection;
const char *section_stack [16];
int subsection_stack [16];
int stack_pos;
FILE *fp;
/* Asm writer */
char *tmpfname;
int mode; /* emit mode */
int col_count; /* bytes emitted per .byte line */
int label_gen;
};
static G_GNUC_UNUSED int
ilog2(int value)
{
int count = -1;
while (value & ~0xf) count += 4, value >>= 4;
while (value) count++, value >>= 1;
return count;
}
/* ASM WRITER */
static void
asm_writer_emit_start (MonoImageWriter *acfg)
{
#if defined(TARGET_ASM_APPLE)
fprintf (acfg->fp, ".subsections_via_symbols\n");
#endif
}
static int
asm_writer_emit_writeout (MonoImageWriter *acfg)
{
fclose (acfg->fp);
return 0;
}
static void
asm_writer_emit_unset_mode (MonoImageWriter *acfg)
{
if (acfg->mode == EMIT_NONE)
return;
fprintf (acfg->fp, "\n");
acfg->mode = EMIT_NONE;
}
static void
asm_writer_emit_section_change (MonoImageWriter *acfg, const char *section_name, int subsection_index)
{
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ASM_APPLE)
if (strcmp(section_name, ".bss") == 0)
fprintf (acfg->fp, "%s\n", ".data");
else if (strstr (section_name, ".debug") == section_name) {
//g_assert (subsection_index == 0);
fprintf (acfg->fp, ".section __DWARF, __%s,regular,debug\n", section_name + 1);
} else
fprintf (acfg->fp, "%s\n", section_name);
#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_POWERPC)
/* ARM gas doesn't seem to like subsections of .bss */
if (!strcmp (section_name, ".text") || !strcmp (section_name, ".data")) {
fprintf (acfg->fp, "%s %d\n", section_name, subsection_index);
} else {
fprintf (acfg->fp, ".section \"%s\"\n", section_name);
fprintf (acfg->fp, ".subsection %d\n", subsection_index);
}
#elif defined(HOST_WIN32)
fprintf (acfg->fp, ".section %s\n", section_name);
#else
if (!strcmp (section_name, ".text") || !strcmp (section_name, ".data") || !strcmp (section_name, ".bss")) {
fprintf (acfg->fp, "%s %d\n", section_name, subsection_index);
} else {
fprintf (acfg->fp, ".section \"%s\"\n", section_name);
fprintf (acfg->fp, ".subsection %d\n", subsection_index);
}
#endif
}
static
const char *get_label (const char *s)
{
#ifdef TARGET_ASM_APPLE
if (s [0] == '.' && s [1] == 'L')
/* apple uses "L" instead of ".L" to mark temporary labels */
s ++;
#endif
return s;
}
#ifdef TARGET_WIN32
#define GLOBAL_SYMBOL_DEF_SCL 2
#define LOCAL_SYMBOL_DEF_SCL 3
static gboolean
asm_writer_in_data_section (MonoImageWriter *acfg)
{
gboolean in_data_section = FALSE;
const char *data_sections [] = {".data", ".bss", ".rdata"};
for (guchar i = 0; i < G_N_ELEMENTS (data_sections); ++i) {
if (strcmp (acfg->current_section, data_sections [i]) == 0) {
in_data_section = TRUE;
break;
}
}
return in_data_section;
}
static void
asm_writer_emit_symbol_type (MonoImageWriter *acfg, const char *name, gboolean func, gboolean global)
{
asm_writer_emit_unset_mode (acfg);
if (func) {
fprintf (acfg->fp, "\t.def %s; .scl %d; .type 32; .endef\n", name, (global == TRUE ? GLOBAL_SYMBOL_DEF_SCL : LOCAL_SYMBOL_DEF_SCL));
} else {
if (!asm_writer_in_data_section (acfg))
fprintf (acfg->fp, "\t.data\n");
}
return;
}
#else
static void
asm_writer_emit_symbol_type (MonoImageWriter *acfg, const char *name, gboolean func, gboolean global)
{
const char *stype;
if (func)
stype = "function";
else
stype = "object";
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ASM_APPLE)
#elif defined(TARGET_ARM)
fprintf (acfg->fp, "\t.type %s,#%s\n", name, stype);
#else
fprintf (acfg->fp, "\t.type %s,@%s\n", name, stype);
#endif
}
#endif /* TARGET_WIN32 */
static void
asm_writer_emit_global (MonoImageWriter *acfg, const char *name, gboolean func)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t.globl %s\n", name);
asm_writer_emit_symbol_type (acfg, name, func, TRUE);
}
static void
asm_writer_emit_local_symbol (MonoImageWriter *acfg, const char *name, const char *end_label, gboolean func)
{
asm_writer_emit_unset_mode (acfg);
#if !defined(TARGET_ASM_APPLE) && !defined(TARGET_WIN32)
fprintf (acfg->fp, "\t.local %s\n", name);
#endif
asm_writer_emit_symbol_type (acfg, name, func, FALSE);
}
static void
asm_writer_emit_symbol_size (MonoImageWriter *acfg, const char *name, const char *end_label)
{
asm_writer_emit_unset_mode (acfg);
#if !defined(TARGET_ASM_APPLE) && !defined(TARGET_WIN32)
fprintf (acfg->fp, "\t.size %s,%s-%s\n", name, end_label, name);
#endif
}
static void
asm_writer_emit_label (MonoImageWriter *acfg, const char *name)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "%s:\n", get_label (name));
}
static void
asm_writer_emit_string (MonoImageWriter *acfg, const char *value)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t%s \"%s\"\n", AS_STRING_DIRECTIVE, value);
}
static void
asm_writer_emit_line (MonoImageWriter *acfg)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\n");
}
static void
asm_writer_emit_alignment (MonoImageWriter *acfg, int size)
{
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ARM)
fprintf (acfg->fp, "\t.align %d\n", ilog2 (size));
#elif defined(__ppc__) && defined(TARGET_ASM_APPLE)
// the mach-o assembler specifies alignments as powers of 2.
fprintf (acfg->fp, "\t.align %d\t; ilog2\n", ilog2(size));
#elif defined(TARGET_ASM_GAS)
fprintf (acfg->fp, "\t.balign %d\n", size);
#elif defined(TARGET_ASM_APPLE)
fprintf (acfg->fp, "\t.align %d\n", ilog2 (size));
#else
fprintf (acfg->fp, "\t.align %d\n", size);
#endif
}
static void
asm_writer_emit_alignment_fill (MonoImageWriter *acfg, int size, int fill)
{
asm_writer_emit_unset_mode (acfg);
#if defined(TARGET_ASM_APPLE)
fprintf (acfg->fp, "\t.align %d, 0x%0x\n", ilog2 (size), fill);
#else
asm_writer_emit_alignment (acfg, size);
#endif
}
static void
asm_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t%s %s\n", AS_POINTER_DIRECTIVE, target ? target : "0");
}
static void
asm_writer_emit_pointer (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_unset_mode (acfg);
asm_writer_emit_alignment (acfg, TARGET_SIZEOF_VOID_P);
asm_writer_emit_pointer_unaligned (acfg, target);
}
static char *byte_to_str;
static void
asm_writer_emit_bytes (MonoImageWriter *acfg, const guint8* buf, int size)
{
int i;
if (acfg->mode != EMIT_BYTE) {
acfg->mode = EMIT_BYTE;
acfg->col_count = 0;
}
if (byte_to_str == NULL) {
byte_to_str = g_new0 (char, 256 * 8);
for (i = 0; i < 256; ++i) {
sprintf (byte_to_str + (i * 8), ",%d", i);
}
}
for (i = 0; i < size; ++i, ++acfg->col_count) {
if ((acfg->col_count % 32) == 0)
fprintf (acfg->fp, "\n\t.byte %d", buf [i]);
else
fputs (byte_to_str + (buf [i] * 8), acfg->fp);
}
}
static void
asm_writer_emit_int16 (MonoImageWriter *acfg, int value)
{
if (acfg->mode != EMIT_WORD) {
acfg->mode = EMIT_WORD;
acfg->col_count = 0;
}
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT16_DIRECTIVE);
else
fprintf (acfg->fp, ", ");
fprintf (acfg->fp, "%d", value);
}
static void
asm_writer_emit_int32 (MonoImageWriter *acfg, int value)
{
if (acfg->mode != EMIT_LONG) {
acfg->mode = EMIT_LONG;
acfg->col_count = 0;
}
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
else
fprintf (acfg->fp, ",");
fprintf (acfg->fp, "%d", value);
}
static void
asm_writer_emit_symbol (MonoImageWriter *acfg, const char *symbol)
{
if (acfg->mode != EMIT_LONG) {
acfg->mode = EMIT_LONG;
acfg->col_count = 0;
}
symbol = get_label (symbol);
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
else
fprintf (acfg->fp, ",");
fprintf (acfg->fp, "%s", symbol);
}
static void
asm_writer_emit_symbol_diff (MonoImageWriter *acfg, const char *end, const char* start, int offset)
{
#ifdef TARGET_ASM_APPLE
//char symbol [128];
#endif
if (acfg->mode != EMIT_LONG) {
acfg->mode = EMIT_LONG;
acfg->col_count = 0;
}
// FIXME: This doesn't seem to work on the iphone
#if 0
//#ifdef TARGET_ASM_APPLE
/* The apple assembler needs a separate symbol to be able to handle complex expressions */
sprintf (symbol, "LTMP_SYM%d", acfg->label_gen);
start = get_label (start);
end = get_label (end);
acfg->label_gen ++;
if (offset > 0)
fprintf (acfg->fp, "\n%s=%s - %s + %d", symbol, end, start, offset);
else if (offset < 0)
fprintf (acfg->fp, "\n%s=%s - %s %d", symbol, end, start, offset);
else
fprintf (acfg->fp, "\n%s=%s - %s", symbol, end, start);
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
fprintf (acfg->fp, "%s", symbol);
#else
start = get_label (start);
end = get_label (end);
if (offset == 0 && strcmp (start, ".") != 0) {
char symbol [128];
sprintf (symbol, "%sDIFF_SYM%d", AS_TEMP_LABEL_PREFIX, acfg->label_gen);
acfg->label_gen ++;
fprintf (acfg->fp, "\n%s=%s - %s", symbol, end, start);
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
fprintf (acfg->fp, "%s", symbol);
return;
}
if ((acfg->col_count++ % 8) == 0)
fprintf (acfg->fp, "\n\t%s ", AS_INT32_DIRECTIVE);
else
fprintf (acfg->fp, ",");
if (offset > 0)
fprintf (acfg->fp, "%s - %s + %d", end, start, offset);
else if (offset < 0)
fprintf (acfg->fp, "%s - %s %d", end, start, offset);
else
fprintf (acfg->fp, "%s - %s", end, start);
#endif
}
static void
asm_writer_emit_zero_bytes (MonoImageWriter *acfg, int num)
{
asm_writer_emit_unset_mode (acfg);
fprintf (acfg->fp, "\t%s %d\n", AS_SKIP_DIRECTIVE, num);
}
/* EMIT FUNCTIONS */
void
mono_img_writer_emit_start (MonoImageWriter *acfg)
{
asm_writer_emit_start (acfg);
}
void
mono_img_writer_emit_section_change (MonoImageWriter *acfg, const char *section_name, int subsection_index)
{
asm_writer_emit_section_change (acfg, section_name, subsection_index);
acfg->current_section = section_name;
acfg->current_subsection = subsection_index;
}
void
mono_img_writer_emit_push_section (MonoImageWriter *acfg, const char *section_name, int subsection)
{
g_assert (acfg->stack_pos < 16 - 1);
acfg->section_stack [acfg->stack_pos] = acfg->current_section;
acfg->subsection_stack [acfg->stack_pos] = acfg->current_subsection;
acfg->stack_pos ++;
mono_img_writer_emit_section_change (acfg, section_name, subsection);
}
void
mono_img_writer_emit_pop_section (MonoImageWriter *acfg)
{
g_assert (acfg->stack_pos > 0);
acfg->stack_pos --;
mono_img_writer_emit_section_change (acfg, acfg->section_stack [acfg->stack_pos], acfg->subsection_stack [acfg->stack_pos]);
}
void
mono_img_writer_set_section_addr (MonoImageWriter *acfg, guint64 addr)
{
NOT_IMPLEMENTED;
}
void
mono_img_writer_emit_global (MonoImageWriter *acfg, const char *name, gboolean func)
{
asm_writer_emit_global (acfg, name, func);
}
void
mono_img_writer_emit_local_symbol (MonoImageWriter *acfg, const char *name, const char *end_label, gboolean func)
{
asm_writer_emit_local_symbol (acfg, name, end_label, func);
}
void
mono_img_writer_emit_symbol_size (MonoImageWriter *acfg, const char *name, const char *end_label)
{
asm_writer_emit_symbol_size (acfg, name, end_label);
}
void
mono_img_writer_emit_label (MonoImageWriter *acfg, const char *name)
{
asm_writer_emit_label (acfg, name);
}
void
mono_img_writer_emit_bytes (MonoImageWriter *acfg, const guint8* buf, int size)
{
asm_writer_emit_bytes (acfg, buf, size);
}
void
mono_img_writer_emit_string (MonoImageWriter *acfg, const char *value)
{
asm_writer_emit_string (acfg, value);
}
void
mono_img_writer_emit_line (MonoImageWriter *acfg)
{
asm_writer_emit_line (acfg);
}
void
mono_img_writer_emit_alignment (MonoImageWriter *acfg, int size)
{
asm_writer_emit_alignment (acfg, size);
}
void
mono_img_writer_emit_alignment_fill (MonoImageWriter *acfg, int size, int fill)
{
asm_writer_emit_alignment_fill (acfg, size, fill);
}
void
mono_img_writer_emit_pointer_unaligned (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_pointer_unaligned (acfg, target);
}
void
mono_img_writer_emit_pointer (MonoImageWriter *acfg, const char *target)
{
asm_writer_emit_pointer (acfg, target);
}
void
mono_img_writer_emit_int16 (MonoImageWriter *acfg, int value)
{
asm_writer_emit_int16 (acfg, value);
}
void
mono_img_writer_emit_int32 (MonoImageWriter *acfg, int value)
{
asm_writer_emit_int32 (acfg, value);
}
void
mono_img_writer_emit_symbol (MonoImageWriter *acfg, const char *symbol)
{
asm_writer_emit_symbol (acfg, symbol);
}
void
mono_img_writer_emit_symbol_diff (MonoImageWriter *acfg, const char *end, const char* start, int offset)
{
asm_writer_emit_symbol_diff (acfg, end, start, offset);
}
void
mono_img_writer_emit_zero_bytes (MonoImageWriter *acfg, int num)
{
asm_writer_emit_zero_bytes (acfg, num);
}
int
mono_img_writer_emit_writeout (MonoImageWriter *acfg)
{
return asm_writer_emit_writeout (acfg);
}
void
mono_img_writer_emit_byte (MonoImageWriter *acfg, guint8 val)
{
mono_img_writer_emit_bytes (acfg, &val, 1);
}
/*
* Emit a relocation entry of type RELOC_TYPE against symbol SYMBOL at the current PC.
* Do not advance PC.
*/
void
mono_img_writer_emit_reloc (MonoImageWriter *acfg, int reloc_type, const char *symbol, int addend)
{
g_assert_not_reached ();
}
/*
* mono_img_writer_emit_unset_mode:
*
* Flush buffered data so it is safe to write to the output file from outside this
* module. This is a nop for the binary writer.
*/
void
mono_img_writer_emit_unset_mode (MonoImageWriter *acfg)
{
asm_writer_emit_unset_mode (acfg);
}
/*
* mono_img_writer_get_output:
*
* Return the output buffer of a binary writer emitting to memory. The returned memory
* is from malloc, and it is owned by the caller.
*/
guint8*
mono_img_writer_get_output (MonoImageWriter *acfg, guint32 *size)
{
g_assert_not_reached ();
return NULL;
}
/*
* mono_img_writer_create:
*
* Create an image writer writing to FP.
*/
MonoImageWriter*
mono_img_writer_create (FILE *fp)
{
MonoImageWriter *w = g_new0 (MonoImageWriter, 1);
g_assert (fp);
w->fp = fp;
w->mempool = mono_mempool_new ();
return w;
}
void
mono_img_writer_destroy (MonoImageWriter *w)
{
// FIXME: Free all the stuff
mono_mempool_destroy (w->mempool);
g_free (w);
}
gboolean
mono_img_writer_subsections_supported (MonoImageWriter *acfg)
{
#ifdef TARGET_ASM_APPLE
return FALSE;
#else
return TRUE;
#endif
}
FILE *
mono_img_writer_get_fp (MonoImageWriter *acfg)
{
return acfg->fp;
}
const char *
mono_img_writer_get_temp_label_prefix (MonoImageWriter *acfg)
{
return AS_TEMP_LABEL_PREFIX;
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/ComTypes/IRunningObjectTable.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.ComponentModel;
namespace System.Runtime.InteropServices.ComTypes
{
[Guid("00000010-0000-0000-C000-000000000046")]
[EditorBrowsable(EditorBrowsableState.Never)]
[InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
[ComImport]
public interface IRunningObjectTable
{
int Register(int grfFlags, [MarshalAs(UnmanagedType.Interface)] object punkObject, IMoniker pmkObjectName);
void Revoke(int dwRegister);
[PreserveSig]
int IsRunning(IMoniker pmkObjectName);
[PreserveSig]
int GetObject(IMoniker pmkObjectName, [MarshalAs(UnmanagedType.Interface)] out object ppunkObject);
void NoteChangeTime(int dwRegister, ref FILETIME pfiletime);
[PreserveSig]
int GetTimeOfLastChange(IMoniker pmkObjectName, out FILETIME pfiletime);
void EnumRunning(out IEnumMoniker ppenumMoniker);
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.ComponentModel;
namespace System.Runtime.InteropServices.ComTypes
{
[Guid("00000010-0000-0000-C000-000000000046")]
[EditorBrowsable(EditorBrowsableState.Never)]
[InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
[ComImport]
public interface IRunningObjectTable
{
int Register(int grfFlags, [MarshalAs(UnmanagedType.Interface)] object punkObject, IMoniker pmkObjectName);
void Revoke(int dwRegister);
[PreserveSig]
int IsRunning(IMoniker pmkObjectName);
[PreserveSig]
int GetObject(IMoniker pmkObjectName, [MarshalAs(UnmanagedType.Interface)] out object ppunkObject);
void NoteChangeTime(int dwRegister, ref FILETIME pfiletime);
[PreserveSig]
int GetTimeOfLastChange(IMoniker pmkObjectName, out FILETIME pfiletime);
void EnumRunning(out IEnumMoniker ppenumMoniker);
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltApiV2/AddParameterB1.xsl |
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" omit-xml-declaration="yes" />
<xsl:param name="param1" select="'default global'"/>
<xsl:template match="/">
<xsl:call-template name="Test" />
</xsl:template>
<xsl:template name="Test">
<xsl:param name="param1" select="'default local'"/>
<result><xsl:value-of select="$param1" /></result>
</xsl:template>
</xsl:stylesheet> |
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" omit-xml-declaration="yes" />
<xsl:param name="param1" select="'default global'"/>
<xsl:template match="/">
<xsl:call-template name="Test" />
</xsl:template>
<xsl:template name="Test">
<xsl:param name="param1" select="'default local'"/>
<result><xsl:value-of select="$param1" /></result>
</xsl:template>
</xsl:stylesheet> | -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/libraries/System.Drawing.Common/src/System/Drawing/Drawing2D/CustomLineCap.Windows.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.InteropServices;
using Gdip = System.Drawing.SafeNativeMethods.Gdip;
namespace System.Drawing.Drawing2D
{
public partial class CustomLineCap
{
internal static CustomLineCap CreateCustomLineCapObject(IntPtr cap)
{
int status = Gdip.GdipGetCustomLineCapType(cap, out CustomLineCapType capType);
if (status != Gdip.Ok)
{
Gdip.GdipDeleteCustomLineCap(cap);
throw Gdip.StatusException(status);
}
switch (capType)
{
case CustomLineCapType.Default:
return new CustomLineCap(cap);
case CustomLineCapType.AdjustableArrowCap:
return new AdjustableArrowCap(cap);
}
Gdip.GdipDeleteCustomLineCap(cap);
throw Gdip.StatusException(Gdip.NotImplemented);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.InteropServices;
using Gdip = System.Drawing.SafeNativeMethods.Gdip;
namespace System.Drawing.Drawing2D
{
public partial class CustomLineCap
{
internal static CustomLineCap CreateCustomLineCapObject(IntPtr cap)
{
int status = Gdip.GdipGetCustomLineCapType(cap, out CustomLineCapType capType);
if (status != Gdip.Ok)
{
Gdip.GdipDeleteCustomLineCap(cap);
throw Gdip.StatusException(status);
}
switch (capType)
{
case CustomLineCapType.Default:
return new CustomLineCap(cap);
case CustomLineCapType.AdjustableArrowCap:
return new AdjustableArrowCap(cap);
}
Gdip.GdipDeleteCustomLineCap(cap);
throw Gdip.StatusException(Gdip.NotImplemented);
}
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/JIT/SIMD/CreateGeneric_r.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="CreateGeneric.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="CreateGeneric.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/SingleMethodCompilationModuleGroup.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Internal.ReadyToRunConstants;
using Internal.TypeSystem;
using Internal.TypeSystem.Ecma;
namespace ILCompiler
{
/// <summary>
/// A compilation group that only contains a single method. Useful for development purposes when investigating
/// code generation issues.
/// </summary>
public class SingleMethodCompilationModuleGroup : ReadyToRunCompilationModuleGroupBase
{
private MethodDesc _method;
public SingleMethodCompilationModuleGroup(
CompilerTypeSystemContext context,
bool isCompositeBuildMode,
bool isInputBubble,
IEnumerable<EcmaModule> compilationModuleSet,
IEnumerable<ModuleDesc> versionBubbleModuleSet,
bool compileGenericDependenciesFromVersionBubbleModuleSet,
MethodDesc method) :
base(context,
isCompositeBuildMode,
isInputBubble,
compilationModuleSet,
versionBubbleModuleSet,
compileGenericDependenciesFromVersionBubbleModuleSet)
{
_method = method;
}
public override bool ContainsMethodBody(MethodDesc method, bool unboxingStub)
{
return (method == _method) || (method == _method.GetCanonMethodTarget(CanonicalFormKind.Specific));
}
public override void ApplyProfilerGuidedCompilationRestriction(ProfileDataManager profileGuidedCompileRestriction)
{
// Profiler guided restrictions are ignored for single method compilation
return;
}
public override ReadyToRunFlags GetReadyToRunFlags()
{
// Partial by definition.
return ReadyToRunFlags.READYTORUN_FLAG_Partial;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Internal.ReadyToRunConstants;
using Internal.TypeSystem;
using Internal.TypeSystem.Ecma;
namespace ILCompiler
{
/// <summary>
/// A compilation group that only contains a single method. Useful for development purposes when investigating
/// code generation issues.
/// </summary>
public class SingleMethodCompilationModuleGroup : ReadyToRunCompilationModuleGroupBase
{
private MethodDesc _method;
public SingleMethodCompilationModuleGroup(
CompilerTypeSystemContext context,
bool isCompositeBuildMode,
bool isInputBubble,
IEnumerable<EcmaModule> compilationModuleSet,
IEnumerable<ModuleDesc> versionBubbleModuleSet,
bool compileGenericDependenciesFromVersionBubbleModuleSet,
MethodDesc method) :
base(context,
isCompositeBuildMode,
isInputBubble,
compilationModuleSet,
versionBubbleModuleSet,
compileGenericDependenciesFromVersionBubbleModuleSet)
{
_method = method;
}
public override bool ContainsMethodBody(MethodDesc method, bool unboxingStub)
{
return (method == _method) || (method == _method.GetCanonMethodTarget(CanonicalFormKind.Specific));
}
public override void ApplyProfilerGuidedCompilationRestriction(ProfileDataManager profileGuidedCompileRestriction)
{
// Profiler guided restrictions are ignored for single method compilation
return;
}
public override ReadyToRunFlags GetReadyToRunFlags()
{
// Partial by definition.
return ReadyToRunFlags.READYTORUN_FLAG_Partial;
}
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/JIT/IL_Conformance/Old/directed/ldarg_s_i2.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly ldarg_s_i2 {}
.class ldarg_s_i2 {
.method public static int32 test_int16()
{
.locals()
.maxstack 256
ldc.i4.0
ldc.i4.1
ldc.i4.2
ldc.i4.3
ldc.i4.4
ldc.i4.5
ldc.i4.6
ldc.i4.7
ldc.i4.8
ldc.i4.s 0x09
ldc.i4.s 0x0a
ldc.i4.s 0x0b
ldc.i4.s 0x0c
ldc.i4.s 0x0d
ldc.i4.s 0x0e
ldc.i4.s 0x0f
ldc.i4.s 0x10
ldc.i4.s 0x11
ldc.i4.s 0x12
ldc.i4.s 0x13
ldc.i4.s 0x14
ldc.i4.s 0x15
ldc.i4.s 0x16
ldc.i4.s 0x17
ldc.i4.s 0x18
ldc.i4.s 0x19
ldc.i4.s 0x1a
ldc.i4.s 0x1b
ldc.i4.s 0x1c
ldc.i4.s 0x1d
ldc.i4.s 0x1e
ldc.i4.s 0x1f
ldc.i4.s 0x20
ldc.i4.s 0x21
ldc.i4.s 0x22
ldc.i4.s 0x23
ldc.i4.s 0x24
ldc.i4.s 0x25
ldc.i4.s 0x26
ldc.i4.s 0x27
ldc.i4.s 0x28
ldc.i4.s 0x29
ldc.i4.s 0x2a
ldc.i4.s 0x2b
ldc.i4.s 0x2c
ldc.i4.s 0x2d
ldc.i4.s 0x2e
ldc.i4.s 0x2f
ldc.i4.s 0x30
ldc.i4.s 0x31
ldc.i4.s 0x32
ldc.i4.s 0x33
ldc.i4.s 0x34
ldc.i4.s 0x35
ldc.i4.s 0x36
ldc.i4.s 0x37
ldc.i4.s 0x38
ldc.i4.s 0x39
ldc.i4.s 0x3a
ldc.i4.s 0x3b
ldc.i4.s 0x3c
ldc.i4.s 0x3d
ldc.i4.s 0x3e
ldc.i4.s 0x3f
ldc.i4.s 0x40
ldc.i4.s 0x41
ldc.i4.s 0x42
ldc.i4.s 0x43
ldc.i4.s 0x44
ldc.i4.s 0x45
ldc.i4.s 0x46
ldc.i4.s 0x47
ldc.i4.s 0x48
ldc.i4.s 0x49
ldc.i4.s 0x4a
ldc.i4.s 0x4b
ldc.i4.s 0x4c
ldc.i4.s 0x4d
ldc.i4.s 0x4e
ldc.i4.s 0x4f
ldc.i4.s 0x50
ldc.i4.s 0x51
ldc.i4.s 0x52
ldc.i4.s 0x53
ldc.i4.s 0x54
ldc.i4.s 0x55
ldc.i4.s 0x56
ldc.i4.s 0x57
ldc.i4.s 0x58
ldc.i4.s 0x59
ldc.i4.s 0x5a
ldc.i4.s 0x5b
ldc.i4.s 0x5c
ldc.i4.s 0x5d
ldc.i4.s 0x5e
ldc.i4.s 0x5f
ldc.i4.s 0x60
ldc.i4.s 0x61
ldc.i4.s 0x62
ldc.i4.s 0x63
ldc.i4.s 0x64
ldc.i4.s 0x65
ldc.i4.s 0x66
ldc.i4.s 0x67
ldc.i4.s 0x68
ldc.i4.s 0x69
ldc.i4.s 0x6a
ldc.i4.s 0x6b
ldc.i4.s 0x6c
ldc.i4.s 0x6d
ldc.i4.s 0x6e
ldc.i4.s 0x6f
ldc.i4.s 0x70
ldc.i4.s 0x71
ldc.i4.s 0x72
ldc.i4.s 0x73
ldc.i4.s 0x74
ldc.i4.s 0x75
ldc.i4.s 0x76
ldc.i4.s 0x77
ldc.i4.s 0x78
ldc.i4.s 0x79
ldc.i4.s 0x7a
ldc.i4.s 0x7b
ldc.i4.s 0x7c
ldc.i4.s 0x7d
ldc.i4.s 0x7e
ldc.i4.s 0x7f
ldc.i4 0x80
ldc.i4 0x81
ldc.i4 0x82
ldc.i4 0x83
ldc.i4 0x84
ldc.i4 0x85
ldc.i4 0x86
ldc.i4 0x87
ldc.i4 0x88
ldc.i4 0x89
ldc.i4 0x8a
ldc.i4 0x8b
ldc.i4 0x8c
ldc.i4 0x8d
ldc.i4 0x8e
ldc.i4 0x8f
ldc.i4 0x90
ldc.i4 0x91
ldc.i4 0x92
ldc.i4 0x93
ldc.i4 0x94
ldc.i4 0x95
ldc.i4 0x96
ldc.i4 0x97
ldc.i4 0x98
ldc.i4 0x99
ldc.i4 0x9a
ldc.i4 0x9b
ldc.i4 0x9c
ldc.i4 0x9d
ldc.i4 0x9e
ldc.i4 0x9f
ldc.i4 0xa0
ldc.i4 0xa1
ldc.i4 0xa2
ldc.i4 0xa3
ldc.i4 0xa4
ldc.i4 0xa5
ldc.i4 0xa6
ldc.i4 0xa7
ldc.i4 0xa8
ldc.i4 0xa9
ldc.i4 0xaa
ldc.i4 0xab
ldc.i4 0xac
ldc.i4 0xad
ldc.i4 0xae
ldc.i4 0xaf
ldc.i4 0xb0
ldc.i4 0xb1
ldc.i4 0xb2
ldc.i4 0xb3
ldc.i4 0xb4
ldc.i4 0xb5
ldc.i4 0xb6
ldc.i4 0xb7
ldc.i4 0xb8
ldc.i4 0xb9
ldc.i4 0xba
ldc.i4 0xbb
ldc.i4 0xbc
ldc.i4 0xbd
ldc.i4 0xbe
ldc.i4 0xbf
ldc.i4 0xc0
ldc.i4 0xc1
ldc.i4 0xc2
ldc.i4 0xc3
ldc.i4 0xc4
ldc.i4 0xc5
ldc.i4 0xc6
ldc.i4 0xc7
ldc.i4 0xc8
ldc.i4 0xc9
ldc.i4 0xca
ldc.i4 0xcb
ldc.i4 0xcc
ldc.i4 0xcd
ldc.i4 0xce
ldc.i4 0xcf
ldc.i4 0xd0
ldc.i4 0xd1
ldc.i4 0xd2
ldc.i4 0xd3
ldc.i4 0xd4
ldc.i4 0xd5
ldc.i4 0xd6
ldc.i4 0xd7
ldc.i4 0xd8
ldc.i4 0xd9
ldc.i4 0xda
ldc.i4 0xdb
ldc.i4 0xdc
ldc.i4 0xdd
ldc.i4 0xde
ldc.i4 0xdf
ldc.i4 0xe0
ldc.i4 0xe1
ldc.i4 0xe2
ldc.i4 0xe3
ldc.i4 0xe4
ldc.i4 0xe5
ldc.i4 0xe6
ldc.i4 0xe7
ldc.i4 0xe8
ldc.i4 0xe9
ldc.i4 0xea
ldc.i4 0xeb
ldc.i4 0xec
ldc.i4 0xed
ldc.i4 0xee
ldc.i4 0xef
ldc.i4 0xf0
ldc.i4 0xf1
ldc.i4 0xf2
ldc.i4 0xf3
ldc.i4 0xf4
ldc.i4 0xf5
ldc.i4 0xf6
ldc.i4 0xf7
ldc.i4 0xf8
ldc.i4 0xf9
ldc.i4 0xfa
ldc.i4 0xfb
ldc.i4 0xfc
ldc.i4 0xfd
ldc.i4 0xfe
ldc.i4 0xff
call int32 ldarg_s_i2::test_int16(
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16)
ret
}
.method public static int32 test_int16(
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16)
{
.locals()
.maxstack 2
ldarg.s 0x00
ldc.i4 0x00
conv.i2
ceq
brfalse FAIL
ldarg.s 0x01
ldc.i4 0x01
conv.i2
ceq
brfalse FAIL
ldarg.s 0x02
ldc.i4 0x02
conv.i2
ceq
brfalse FAIL
ldarg.s 0x03
ldc.i4 0x03
conv.i2
ceq
brfalse FAIL
ldarg.s 0x04
ldc.i4 0x04
conv.i2
ceq
brfalse FAIL
ldarg.s 0x05
ldc.i4 0x05
conv.i2
ceq
brfalse FAIL
ldarg.s 0x06
ldc.i4 0x06
conv.i2
ceq
brfalse FAIL
ldarg.s 0x07
ldc.i4 0x07
conv.i2
ceq
brfalse FAIL
ldarg.s 0x08
ldc.i4 0x08
conv.i2
ceq
brfalse FAIL
ldarg.s 0x09
ldc.i4 0x09
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0a
ldc.i4 0x0a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0b
ldc.i4 0x0b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0c
ldc.i4 0x0c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0d
ldc.i4 0x0d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0e
ldc.i4 0x0e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0f
ldc.i4 0x0f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x10
ldc.i4 0x10
conv.i2
ceq
brfalse FAIL
ldarg.s 0x11
ldc.i4 0x11
conv.i2
ceq
brfalse FAIL
ldarg.s 0x12
ldc.i4 0x12
conv.i2
ceq
brfalse FAIL
ldarg.s 0x13
ldc.i4 0x13
conv.i2
ceq
brfalse FAIL
ldarg.s 0x14
ldc.i4 0x14
conv.i2
ceq
brfalse FAIL
ldarg.s 0x15
ldc.i4 0x15
conv.i2
ceq
brfalse FAIL
ldarg.s 0x16
ldc.i4 0x16
conv.i2
ceq
brfalse FAIL
ldarg.s 0x17
ldc.i4 0x17
conv.i2
ceq
brfalse FAIL
ldarg.s 0x18
ldc.i4 0x18
conv.i2
ceq
brfalse FAIL
ldarg.s 0x19
ldc.i4 0x19
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1a
ldc.i4 0x1a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1b
ldc.i4 0x1b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1c
ldc.i4 0x1c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1d
ldc.i4 0x1d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1e
ldc.i4 0x1e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1f
ldc.i4 0x1f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x20
ldc.i4 0x20
conv.i2
ceq
brfalse FAIL
ldarg.s 0x21
ldc.i4 0x21
conv.i2
ceq
brfalse FAIL
ldarg.s 0x22
ldc.i4 0x22
conv.i2
ceq
brfalse FAIL
ldarg.s 0x23
ldc.i4 0x23
conv.i2
ceq
brfalse FAIL
ldarg.s 0x24
ldc.i4 0x24
conv.i2
ceq
brfalse FAIL
ldarg.s 0x25
ldc.i4 0x25
conv.i2
ceq
brfalse FAIL
ldarg.s 0x26
ldc.i4 0x26
conv.i2
ceq
brfalse FAIL
ldarg.s 0x27
ldc.i4 0x27
conv.i2
ceq
brfalse FAIL
ldarg.s 0x28
ldc.i4 0x28
conv.i2
ceq
brfalse FAIL
ldarg.s 0x29
ldc.i4 0x29
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2a
ldc.i4 0x2a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2b
ldc.i4 0x2b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2c
ldc.i4 0x2c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2d
ldc.i4 0x2d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2e
ldc.i4 0x2e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2f
ldc.i4 0x2f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x30
ldc.i4 0x30
conv.i2
ceq
brfalse FAIL
ldarg.s 0x31
ldc.i4 0x31
conv.i2
ceq
brfalse FAIL
ldarg.s 0x16
ldc.i4 0x16
conv.i2
ceq
brfalse FAIL
ldarg.s 0x33
ldc.i4 0x33
conv.i2
ceq
brfalse FAIL
ldarg.s 0x34
ldc.i4 0x34
conv.i2
ceq
brfalse FAIL
ldarg.s 0x35
ldc.i4 0x35
conv.i2
ceq
brfalse FAIL
ldarg.s 0x36
ldc.i4 0x36
conv.i2
ceq
brfalse FAIL
ldarg.s 0x37
ldc.i4 0x37
conv.i2
ceq
brfalse FAIL
ldarg.s 0x38
ldc.i4 0x38
conv.i2
ceq
brfalse FAIL
ldarg.s 0x39
ldc.i4 0x39
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3a
ldc.i4 0x3a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3b
ldc.i4 0x3b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3c
ldc.i4 0x3c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3d
ldc.i4 0x3d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3e
ldc.i4 0x3e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3f
ldc.i4 0x3f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x40
ldc.i4 0x40
conv.i2
ceq
brfalse FAIL
ldarg.s 0x41
ldc.i4 0x41
conv.i2
ceq
brfalse FAIL
ldarg.s 0x42
ldc.i4 0x42
conv.i2
ceq
brfalse FAIL
ldarg.s 0x43
ldc.i4 0x43
conv.i2
ceq
brfalse FAIL
ldarg.s 0x44
ldc.i4 0x44
conv.i2
ceq
brfalse FAIL
ldarg.s 0x45
ldc.i4 0x45
conv.i2
ceq
brfalse FAIL
ldarg.s 0x46
ldc.i4 0x46
conv.i2
ceq
brfalse FAIL
ldarg.s 0x47
ldc.i4 0x47
conv.i2
ceq
brfalse FAIL
ldarg.s 0x48
ldc.i4 0x48
conv.i2
ceq
brfalse FAIL
ldarg.s 0x49
ldc.i4 0x49
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4a
ldc.i4 0x4a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4b
ldc.i4 0x4b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4c
ldc.i4 0x4c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4d
ldc.i4 0x4d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4e
ldc.i4 0x4e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4f
ldc.i4 0x4f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x50
ldc.i4 0x50
conv.i2
ceq
brfalse FAIL
ldarg.s 0x51
ldc.i4 0x51
conv.i2
ceq
brfalse FAIL
ldarg.s 0x52
ldc.i4 0x52
conv.i2
ceq
brfalse FAIL
ldarg.s 0x53
ldc.i4 0x53
conv.i2
ceq
brfalse FAIL
ldarg.s 0x54
ldc.i4 0x54
conv.i2
ceq
brfalse FAIL
ldarg.s 0x55
ldc.i4 0x55
conv.i2
ceq
brfalse FAIL
ldarg.s 0x56
ldc.i4 0x56
conv.i2
ceq
brfalse FAIL
ldarg.s 0x57
ldc.i4 0x57
conv.i2
ceq
brfalse FAIL
ldarg.s 0x58
ldc.i4 0x58
conv.i2
ceq
brfalse FAIL
ldarg.s 0x59
ldc.i4 0x59
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5a
ldc.i4 0x5a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5b
ldc.i4 0x5b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5c
ldc.i4 0x5c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5d
ldc.i4 0x5d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5e
ldc.i4 0x5e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5f
ldc.i4 0x5f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x60
ldc.i4 0x60
conv.i2
ceq
brfalse FAIL
ldarg.s 0x61
ldc.i4 0x61
conv.i2
ceq
brfalse FAIL
ldarg.s 0x62
ldc.i4 0x62
conv.i2
ceq
brfalse FAIL
ldarg.s 0x63
ldc.i4 0x63
conv.i2
ceq
brfalse FAIL
ldarg.s 0x64
ldc.i4 0x64
conv.i2
ceq
brfalse FAIL
ldarg.s 0x65
ldc.i4 0x65
conv.i2
ceq
brfalse FAIL
ldarg.s 0x66
ldc.i4 0x66
conv.i2
ceq
brfalse FAIL
ldarg.s 0x67
ldc.i4 0x67
conv.i2
ceq
brfalse FAIL
ldarg.s 0x68
ldc.i4 0x68
conv.i2
ceq
brfalse FAIL
ldarg.s 0x69
ldc.i4 0x69
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6a
ldc.i4 0x6a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6b
ldc.i4 0x6b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6c
ldc.i4 0x6c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6d
ldc.i4 0x6d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6e
ldc.i4 0x6e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6f
ldc.i4 0x6f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x70
ldc.i4 0x70
conv.i2
ceq
brfalse FAIL
ldarg.s 0x71
ldc.i4 0x71
conv.i2
ceq
brfalse FAIL
ldarg.s 0x72
ldc.i4 0x72
conv.i2
ceq
brfalse FAIL
ldarg.s 0x73
ldc.i4 0x73
conv.i2
ceq
brfalse FAIL
ldarg.s 0x74
ldc.i4 0x74
conv.i2
ceq
brfalse FAIL
ldarg.s 0x75
ldc.i4 0x75
conv.i2
ceq
brfalse FAIL
ldarg.s 0x76
ldc.i4 0x76
conv.i2
ceq
brfalse FAIL
ldarg.s 0x77
ldc.i4 0x77
conv.i2
ceq
brfalse FAIL
ldarg.s 0x78
ldc.i4 0x78
conv.i2
ceq
brfalse FAIL
ldarg.s 0x79
ldc.i4 0x79
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7a
ldc.i4 0x7a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7b
ldc.i4 0x7b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7c
ldc.i4 0x7c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7d
ldc.i4 0x7d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7e
ldc.i4 0x7e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7f
ldc.i4 0x7f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x80
ldc.i4 0x80
conv.i2
ceq
brfalse FAIL
ldarg.s 0x81
ldc.i4 0x81
conv.i2
ceq
brfalse FAIL
ldarg.s 0x82
ldc.i4 0x82
conv.i2
ceq
brfalse FAIL
ldarg.s 0x83
ldc.i4 0x83
conv.i2
ceq
brfalse FAIL
ldarg.s 0x84
ldc.i4 0x84
conv.i2
ceq
brfalse FAIL
ldarg.s 0x85
ldc.i4 0x85
conv.i2
ceq
brfalse FAIL
ldarg.s 0x86
ldc.i4 0x86
conv.i2
ceq
brfalse FAIL
ldarg.s 0x87
ldc.i4 0x87
conv.i2
ceq
brfalse FAIL
ldarg.s 0x88
ldc.i4 0x88
conv.i2
ceq
brfalse FAIL
ldarg.s 0x89
ldc.i4 0x89
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8a
ldc.i4 0x8a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8b
ldc.i4 0x8b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8c
ldc.i4 0x8c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8d
ldc.i4 0x8d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8e
ldc.i4 0x8e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8f
ldc.i4 0x8f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x90
ldc.i4 0x90
conv.i2
ceq
brfalse FAIL
ldarg.s 0x91
ldc.i4 0x91
conv.i2
ceq
brfalse FAIL
ldarg.s 0x92
ldc.i4 0x92
conv.i2
ceq
brfalse FAIL
ldarg.s 0x93
ldc.i4 0x93
conv.i2
ceq
brfalse FAIL
ldarg.s 0x94
ldc.i4 0x94
conv.i2
ceq
brfalse FAIL
ldarg.s 0x95
ldc.i4 0x95
conv.i2
ceq
brfalse FAIL
ldarg.s 0x96
ldc.i4 0x96
conv.i2
ceq
brfalse FAIL
ldarg.s 0x97
ldc.i4 0x97
conv.i2
ceq
brfalse FAIL
ldarg.s 0x98
ldc.i4 0x98
conv.i2
ceq
brfalse FAIL
ldarg.s 0x99
ldc.i4 0x99
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9a
ldc.i4 0x9a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9b
ldc.i4 0x9b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9c
ldc.i4 0x9c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9d
ldc.i4 0x9d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9e
ldc.i4 0x9e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9f
ldc.i4 0x9f
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa0
ldc.i4 0xa0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa1
ldc.i4 0xa1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa2
ldc.i4 0xa2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa3
ldc.i4 0xa3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa4
ldc.i4 0xa4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa5
ldc.i4 0xa5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa6
ldc.i4 0xa6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa7
ldc.i4 0xa7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa8
ldc.i4 0xa8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa9
ldc.i4 0xa9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xaa
ldc.i4 0xaa
conv.i2
ceq
brfalse FAIL
ldarg.s 0xab
ldc.i4 0xab
conv.i2
ceq
brfalse FAIL
ldarg.s 0xac
ldc.i4 0xac
conv.i2
ceq
brfalse FAIL
ldarg.s 0xad
ldc.i4 0xad
conv.i2
ceq
brfalse FAIL
ldarg.s 0xae
ldc.i4 0xae
conv.i2
ceq
brfalse FAIL
ldarg.s 0xaf
ldc.i4 0xaf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb0
ldc.i4 0xb0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb1
ldc.i4 0xb1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb2
ldc.i4 0xb2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb3
ldc.i4 0xb3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb4
ldc.i4 0xb4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb5
ldc.i4 0xb5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb6
ldc.i4 0xb6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb7
ldc.i4 0xb7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb8
ldc.i4 0xb8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb9
ldc.i4 0xb9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xba
ldc.i4 0xba
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbb
ldc.i4 0xbb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbc
ldc.i4 0xbc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbd
ldc.i4 0xbd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbe
ldc.i4 0xbe
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbf
ldc.i4 0xbf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc0
ldc.i4 0xc0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc1
ldc.i4 0xc1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc2
ldc.i4 0xc2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc3
ldc.i4 0xc3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc4
ldc.i4 0xc4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc5
ldc.i4 0xc5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc6
ldc.i4 0xc6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc7
ldc.i4 0xc7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc8
ldc.i4 0xc8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc9
ldc.i4 0xc9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xca
ldc.i4 0xca
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcb
ldc.i4 0xcb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcc
ldc.i4 0xcc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcd
ldc.i4 0xcd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xce
ldc.i4 0xce
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcf
ldc.i4 0xcf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd0
ldc.i4 0xd0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd1
ldc.i4 0xd1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd2
ldc.i4 0xd2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd3
ldc.i4 0xd3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd4
ldc.i4 0xd4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd5
ldc.i4 0xd5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd6
ldc.i4 0xd6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd7
ldc.i4 0xd7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd8
ldc.i4 0xd8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd9
ldc.i4 0xd9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xda
ldc.i4 0xda
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdb
ldc.i4 0xdb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdc
ldc.i4 0xdc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdd
ldc.i4 0xdd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xde
ldc.i4 0xde
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdf
ldc.i4 0xdf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe0
ldc.i4 0xe0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe1
ldc.i4 0xe1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe2
ldc.i4 0xe2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe3
ldc.i4 0xe3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe4
ldc.i4 0xe4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe5
ldc.i4 0xe5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe6
ldc.i4 0xe6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe7
ldc.i4 0xe7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe8
ldc.i4 0xe8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe9
ldc.i4 0xe9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xea
ldc.i4 0xea
conv.i2
ceq
brfalse FAIL
ldarg.s 0xeb
ldc.i4 0xeb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xec
ldc.i4 0xec
conv.i2
ceq
brfalse FAIL
ldarg.s 0xed
ldc.i4 0xed
conv.i2
ceq
brfalse FAIL
ldarg.s 0xee
ldc.i4 0xee
conv.i2
ceq
brfalse FAIL
ldarg.s 0xef
ldc.i4 0xef
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf0
ldc.i4 0xf0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf1
ldc.i4 0xf1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf2
ldc.i4 0xf2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf3
ldc.i4 0xf3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf4
ldc.i4 0xf4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf5
ldc.i4 0xf5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf6
ldc.i4 0xf6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf7
ldc.i4 0xf7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf8
ldc.i4 0xf8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf9
ldc.i4 0xf9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfa
ldc.i4 0xfa
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfb
ldc.i4 0xfb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfc
ldc.i4 0xfc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfd
ldc.i4 0xfd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfe
ldc.i4 0xfe
conv.i2
ceq
brfalse FAIL
ldarg.s 0xff
ldc.i4 0xff
conv.i2
ceq
brfalse FAIL
ldc.i4.1
ret
FAIL:
ldc.i4.0
ret
}
.method public static int32 main(class [mscorlib]System.String[]) {
.entrypoint
.maxstack 2
call int32 ldarg_s_i2::test_int16()
ldc.i4.1
ceq
brfalse FAIL
ldc.i4 100
ret
FAIL:
ldc.i4.0
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly ldarg_s_i2 {}
.class ldarg_s_i2 {
.method public static int32 test_int16()
{
.locals()
.maxstack 256
ldc.i4.0
ldc.i4.1
ldc.i4.2
ldc.i4.3
ldc.i4.4
ldc.i4.5
ldc.i4.6
ldc.i4.7
ldc.i4.8
ldc.i4.s 0x09
ldc.i4.s 0x0a
ldc.i4.s 0x0b
ldc.i4.s 0x0c
ldc.i4.s 0x0d
ldc.i4.s 0x0e
ldc.i4.s 0x0f
ldc.i4.s 0x10
ldc.i4.s 0x11
ldc.i4.s 0x12
ldc.i4.s 0x13
ldc.i4.s 0x14
ldc.i4.s 0x15
ldc.i4.s 0x16
ldc.i4.s 0x17
ldc.i4.s 0x18
ldc.i4.s 0x19
ldc.i4.s 0x1a
ldc.i4.s 0x1b
ldc.i4.s 0x1c
ldc.i4.s 0x1d
ldc.i4.s 0x1e
ldc.i4.s 0x1f
ldc.i4.s 0x20
ldc.i4.s 0x21
ldc.i4.s 0x22
ldc.i4.s 0x23
ldc.i4.s 0x24
ldc.i4.s 0x25
ldc.i4.s 0x26
ldc.i4.s 0x27
ldc.i4.s 0x28
ldc.i4.s 0x29
ldc.i4.s 0x2a
ldc.i4.s 0x2b
ldc.i4.s 0x2c
ldc.i4.s 0x2d
ldc.i4.s 0x2e
ldc.i4.s 0x2f
ldc.i4.s 0x30
ldc.i4.s 0x31
ldc.i4.s 0x32
ldc.i4.s 0x33
ldc.i4.s 0x34
ldc.i4.s 0x35
ldc.i4.s 0x36
ldc.i4.s 0x37
ldc.i4.s 0x38
ldc.i4.s 0x39
ldc.i4.s 0x3a
ldc.i4.s 0x3b
ldc.i4.s 0x3c
ldc.i4.s 0x3d
ldc.i4.s 0x3e
ldc.i4.s 0x3f
ldc.i4.s 0x40
ldc.i4.s 0x41
ldc.i4.s 0x42
ldc.i4.s 0x43
ldc.i4.s 0x44
ldc.i4.s 0x45
ldc.i4.s 0x46
ldc.i4.s 0x47
ldc.i4.s 0x48
ldc.i4.s 0x49
ldc.i4.s 0x4a
ldc.i4.s 0x4b
ldc.i4.s 0x4c
ldc.i4.s 0x4d
ldc.i4.s 0x4e
ldc.i4.s 0x4f
ldc.i4.s 0x50
ldc.i4.s 0x51
ldc.i4.s 0x52
ldc.i4.s 0x53
ldc.i4.s 0x54
ldc.i4.s 0x55
ldc.i4.s 0x56
ldc.i4.s 0x57
ldc.i4.s 0x58
ldc.i4.s 0x59
ldc.i4.s 0x5a
ldc.i4.s 0x5b
ldc.i4.s 0x5c
ldc.i4.s 0x5d
ldc.i4.s 0x5e
ldc.i4.s 0x5f
ldc.i4.s 0x60
ldc.i4.s 0x61
ldc.i4.s 0x62
ldc.i4.s 0x63
ldc.i4.s 0x64
ldc.i4.s 0x65
ldc.i4.s 0x66
ldc.i4.s 0x67
ldc.i4.s 0x68
ldc.i4.s 0x69
ldc.i4.s 0x6a
ldc.i4.s 0x6b
ldc.i4.s 0x6c
ldc.i4.s 0x6d
ldc.i4.s 0x6e
ldc.i4.s 0x6f
ldc.i4.s 0x70
ldc.i4.s 0x71
ldc.i4.s 0x72
ldc.i4.s 0x73
ldc.i4.s 0x74
ldc.i4.s 0x75
ldc.i4.s 0x76
ldc.i4.s 0x77
ldc.i4.s 0x78
ldc.i4.s 0x79
ldc.i4.s 0x7a
ldc.i4.s 0x7b
ldc.i4.s 0x7c
ldc.i4.s 0x7d
ldc.i4.s 0x7e
ldc.i4.s 0x7f
ldc.i4 0x80
ldc.i4 0x81
ldc.i4 0x82
ldc.i4 0x83
ldc.i4 0x84
ldc.i4 0x85
ldc.i4 0x86
ldc.i4 0x87
ldc.i4 0x88
ldc.i4 0x89
ldc.i4 0x8a
ldc.i4 0x8b
ldc.i4 0x8c
ldc.i4 0x8d
ldc.i4 0x8e
ldc.i4 0x8f
ldc.i4 0x90
ldc.i4 0x91
ldc.i4 0x92
ldc.i4 0x93
ldc.i4 0x94
ldc.i4 0x95
ldc.i4 0x96
ldc.i4 0x97
ldc.i4 0x98
ldc.i4 0x99
ldc.i4 0x9a
ldc.i4 0x9b
ldc.i4 0x9c
ldc.i4 0x9d
ldc.i4 0x9e
ldc.i4 0x9f
ldc.i4 0xa0
ldc.i4 0xa1
ldc.i4 0xa2
ldc.i4 0xa3
ldc.i4 0xa4
ldc.i4 0xa5
ldc.i4 0xa6
ldc.i4 0xa7
ldc.i4 0xa8
ldc.i4 0xa9
ldc.i4 0xaa
ldc.i4 0xab
ldc.i4 0xac
ldc.i4 0xad
ldc.i4 0xae
ldc.i4 0xaf
ldc.i4 0xb0
ldc.i4 0xb1
ldc.i4 0xb2
ldc.i4 0xb3
ldc.i4 0xb4
ldc.i4 0xb5
ldc.i4 0xb6
ldc.i4 0xb7
ldc.i4 0xb8
ldc.i4 0xb9
ldc.i4 0xba
ldc.i4 0xbb
ldc.i4 0xbc
ldc.i4 0xbd
ldc.i4 0xbe
ldc.i4 0xbf
ldc.i4 0xc0
ldc.i4 0xc1
ldc.i4 0xc2
ldc.i4 0xc3
ldc.i4 0xc4
ldc.i4 0xc5
ldc.i4 0xc6
ldc.i4 0xc7
ldc.i4 0xc8
ldc.i4 0xc9
ldc.i4 0xca
ldc.i4 0xcb
ldc.i4 0xcc
ldc.i4 0xcd
ldc.i4 0xce
ldc.i4 0xcf
ldc.i4 0xd0
ldc.i4 0xd1
ldc.i4 0xd2
ldc.i4 0xd3
ldc.i4 0xd4
ldc.i4 0xd5
ldc.i4 0xd6
ldc.i4 0xd7
ldc.i4 0xd8
ldc.i4 0xd9
ldc.i4 0xda
ldc.i4 0xdb
ldc.i4 0xdc
ldc.i4 0xdd
ldc.i4 0xde
ldc.i4 0xdf
ldc.i4 0xe0
ldc.i4 0xe1
ldc.i4 0xe2
ldc.i4 0xe3
ldc.i4 0xe4
ldc.i4 0xe5
ldc.i4 0xe6
ldc.i4 0xe7
ldc.i4 0xe8
ldc.i4 0xe9
ldc.i4 0xea
ldc.i4 0xeb
ldc.i4 0xec
ldc.i4 0xed
ldc.i4 0xee
ldc.i4 0xef
ldc.i4 0xf0
ldc.i4 0xf1
ldc.i4 0xf2
ldc.i4 0xf3
ldc.i4 0xf4
ldc.i4 0xf5
ldc.i4 0xf6
ldc.i4 0xf7
ldc.i4 0xf8
ldc.i4 0xf9
ldc.i4 0xfa
ldc.i4 0xfb
ldc.i4 0xfc
ldc.i4 0xfd
ldc.i4 0xfe
ldc.i4 0xff
call int32 ldarg_s_i2::test_int16(
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16)
ret
}
.method public static int32 test_int16(
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16,
int16, int16, int16, int16, int16, int16, int16, int16)
{
.locals()
.maxstack 2
ldarg.s 0x00
ldc.i4 0x00
conv.i2
ceq
brfalse FAIL
ldarg.s 0x01
ldc.i4 0x01
conv.i2
ceq
brfalse FAIL
ldarg.s 0x02
ldc.i4 0x02
conv.i2
ceq
brfalse FAIL
ldarg.s 0x03
ldc.i4 0x03
conv.i2
ceq
brfalse FAIL
ldarg.s 0x04
ldc.i4 0x04
conv.i2
ceq
brfalse FAIL
ldarg.s 0x05
ldc.i4 0x05
conv.i2
ceq
brfalse FAIL
ldarg.s 0x06
ldc.i4 0x06
conv.i2
ceq
brfalse FAIL
ldarg.s 0x07
ldc.i4 0x07
conv.i2
ceq
brfalse FAIL
ldarg.s 0x08
ldc.i4 0x08
conv.i2
ceq
brfalse FAIL
ldarg.s 0x09
ldc.i4 0x09
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0a
ldc.i4 0x0a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0b
ldc.i4 0x0b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0c
ldc.i4 0x0c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0d
ldc.i4 0x0d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0e
ldc.i4 0x0e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x0f
ldc.i4 0x0f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x10
ldc.i4 0x10
conv.i2
ceq
brfalse FAIL
ldarg.s 0x11
ldc.i4 0x11
conv.i2
ceq
brfalse FAIL
ldarg.s 0x12
ldc.i4 0x12
conv.i2
ceq
brfalse FAIL
ldarg.s 0x13
ldc.i4 0x13
conv.i2
ceq
brfalse FAIL
ldarg.s 0x14
ldc.i4 0x14
conv.i2
ceq
brfalse FAIL
ldarg.s 0x15
ldc.i4 0x15
conv.i2
ceq
brfalse FAIL
ldarg.s 0x16
ldc.i4 0x16
conv.i2
ceq
brfalse FAIL
ldarg.s 0x17
ldc.i4 0x17
conv.i2
ceq
brfalse FAIL
ldarg.s 0x18
ldc.i4 0x18
conv.i2
ceq
brfalse FAIL
ldarg.s 0x19
ldc.i4 0x19
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1a
ldc.i4 0x1a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1b
ldc.i4 0x1b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1c
ldc.i4 0x1c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1d
ldc.i4 0x1d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1e
ldc.i4 0x1e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x1f
ldc.i4 0x1f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x20
ldc.i4 0x20
conv.i2
ceq
brfalse FAIL
ldarg.s 0x21
ldc.i4 0x21
conv.i2
ceq
brfalse FAIL
ldarg.s 0x22
ldc.i4 0x22
conv.i2
ceq
brfalse FAIL
ldarg.s 0x23
ldc.i4 0x23
conv.i2
ceq
brfalse FAIL
ldarg.s 0x24
ldc.i4 0x24
conv.i2
ceq
brfalse FAIL
ldarg.s 0x25
ldc.i4 0x25
conv.i2
ceq
brfalse FAIL
ldarg.s 0x26
ldc.i4 0x26
conv.i2
ceq
brfalse FAIL
ldarg.s 0x27
ldc.i4 0x27
conv.i2
ceq
brfalse FAIL
ldarg.s 0x28
ldc.i4 0x28
conv.i2
ceq
brfalse FAIL
ldarg.s 0x29
ldc.i4 0x29
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2a
ldc.i4 0x2a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2b
ldc.i4 0x2b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2c
ldc.i4 0x2c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2d
ldc.i4 0x2d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2e
ldc.i4 0x2e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x2f
ldc.i4 0x2f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x30
ldc.i4 0x30
conv.i2
ceq
brfalse FAIL
ldarg.s 0x31
ldc.i4 0x31
conv.i2
ceq
brfalse FAIL
ldarg.s 0x16
ldc.i4 0x16
conv.i2
ceq
brfalse FAIL
ldarg.s 0x33
ldc.i4 0x33
conv.i2
ceq
brfalse FAIL
ldarg.s 0x34
ldc.i4 0x34
conv.i2
ceq
brfalse FAIL
ldarg.s 0x35
ldc.i4 0x35
conv.i2
ceq
brfalse FAIL
ldarg.s 0x36
ldc.i4 0x36
conv.i2
ceq
brfalse FAIL
ldarg.s 0x37
ldc.i4 0x37
conv.i2
ceq
brfalse FAIL
ldarg.s 0x38
ldc.i4 0x38
conv.i2
ceq
brfalse FAIL
ldarg.s 0x39
ldc.i4 0x39
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3a
ldc.i4 0x3a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3b
ldc.i4 0x3b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3c
ldc.i4 0x3c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3d
ldc.i4 0x3d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3e
ldc.i4 0x3e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x3f
ldc.i4 0x3f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x40
ldc.i4 0x40
conv.i2
ceq
brfalse FAIL
ldarg.s 0x41
ldc.i4 0x41
conv.i2
ceq
brfalse FAIL
ldarg.s 0x42
ldc.i4 0x42
conv.i2
ceq
brfalse FAIL
ldarg.s 0x43
ldc.i4 0x43
conv.i2
ceq
brfalse FAIL
ldarg.s 0x44
ldc.i4 0x44
conv.i2
ceq
brfalse FAIL
ldarg.s 0x45
ldc.i4 0x45
conv.i2
ceq
brfalse FAIL
ldarg.s 0x46
ldc.i4 0x46
conv.i2
ceq
brfalse FAIL
ldarg.s 0x47
ldc.i4 0x47
conv.i2
ceq
brfalse FAIL
ldarg.s 0x48
ldc.i4 0x48
conv.i2
ceq
brfalse FAIL
ldarg.s 0x49
ldc.i4 0x49
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4a
ldc.i4 0x4a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4b
ldc.i4 0x4b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4c
ldc.i4 0x4c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4d
ldc.i4 0x4d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4e
ldc.i4 0x4e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x4f
ldc.i4 0x4f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x50
ldc.i4 0x50
conv.i2
ceq
brfalse FAIL
ldarg.s 0x51
ldc.i4 0x51
conv.i2
ceq
brfalse FAIL
ldarg.s 0x52
ldc.i4 0x52
conv.i2
ceq
brfalse FAIL
ldarg.s 0x53
ldc.i4 0x53
conv.i2
ceq
brfalse FAIL
ldarg.s 0x54
ldc.i4 0x54
conv.i2
ceq
brfalse FAIL
ldarg.s 0x55
ldc.i4 0x55
conv.i2
ceq
brfalse FAIL
ldarg.s 0x56
ldc.i4 0x56
conv.i2
ceq
brfalse FAIL
ldarg.s 0x57
ldc.i4 0x57
conv.i2
ceq
brfalse FAIL
ldarg.s 0x58
ldc.i4 0x58
conv.i2
ceq
brfalse FAIL
ldarg.s 0x59
ldc.i4 0x59
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5a
ldc.i4 0x5a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5b
ldc.i4 0x5b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5c
ldc.i4 0x5c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5d
ldc.i4 0x5d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5e
ldc.i4 0x5e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x5f
ldc.i4 0x5f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x60
ldc.i4 0x60
conv.i2
ceq
brfalse FAIL
ldarg.s 0x61
ldc.i4 0x61
conv.i2
ceq
brfalse FAIL
ldarg.s 0x62
ldc.i4 0x62
conv.i2
ceq
brfalse FAIL
ldarg.s 0x63
ldc.i4 0x63
conv.i2
ceq
brfalse FAIL
ldarg.s 0x64
ldc.i4 0x64
conv.i2
ceq
brfalse FAIL
ldarg.s 0x65
ldc.i4 0x65
conv.i2
ceq
brfalse FAIL
ldarg.s 0x66
ldc.i4 0x66
conv.i2
ceq
brfalse FAIL
ldarg.s 0x67
ldc.i4 0x67
conv.i2
ceq
brfalse FAIL
ldarg.s 0x68
ldc.i4 0x68
conv.i2
ceq
brfalse FAIL
ldarg.s 0x69
ldc.i4 0x69
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6a
ldc.i4 0x6a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6b
ldc.i4 0x6b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6c
ldc.i4 0x6c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6d
ldc.i4 0x6d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6e
ldc.i4 0x6e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x6f
ldc.i4 0x6f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x70
ldc.i4 0x70
conv.i2
ceq
brfalse FAIL
ldarg.s 0x71
ldc.i4 0x71
conv.i2
ceq
brfalse FAIL
ldarg.s 0x72
ldc.i4 0x72
conv.i2
ceq
brfalse FAIL
ldarg.s 0x73
ldc.i4 0x73
conv.i2
ceq
brfalse FAIL
ldarg.s 0x74
ldc.i4 0x74
conv.i2
ceq
brfalse FAIL
ldarg.s 0x75
ldc.i4 0x75
conv.i2
ceq
brfalse FAIL
ldarg.s 0x76
ldc.i4 0x76
conv.i2
ceq
brfalse FAIL
ldarg.s 0x77
ldc.i4 0x77
conv.i2
ceq
brfalse FAIL
ldarg.s 0x78
ldc.i4 0x78
conv.i2
ceq
brfalse FAIL
ldarg.s 0x79
ldc.i4 0x79
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7a
ldc.i4 0x7a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7b
ldc.i4 0x7b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7c
ldc.i4 0x7c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7d
ldc.i4 0x7d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7e
ldc.i4 0x7e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x7f
ldc.i4 0x7f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x80
ldc.i4 0x80
conv.i2
ceq
brfalse FAIL
ldarg.s 0x81
ldc.i4 0x81
conv.i2
ceq
brfalse FAIL
ldarg.s 0x82
ldc.i4 0x82
conv.i2
ceq
brfalse FAIL
ldarg.s 0x83
ldc.i4 0x83
conv.i2
ceq
brfalse FAIL
ldarg.s 0x84
ldc.i4 0x84
conv.i2
ceq
brfalse FAIL
ldarg.s 0x85
ldc.i4 0x85
conv.i2
ceq
brfalse FAIL
ldarg.s 0x86
ldc.i4 0x86
conv.i2
ceq
brfalse FAIL
ldarg.s 0x87
ldc.i4 0x87
conv.i2
ceq
brfalse FAIL
ldarg.s 0x88
ldc.i4 0x88
conv.i2
ceq
brfalse FAIL
ldarg.s 0x89
ldc.i4 0x89
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8a
ldc.i4 0x8a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8b
ldc.i4 0x8b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8c
ldc.i4 0x8c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8d
ldc.i4 0x8d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8e
ldc.i4 0x8e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x8f
ldc.i4 0x8f
conv.i2
ceq
brfalse FAIL
ldarg.s 0x90
ldc.i4 0x90
conv.i2
ceq
brfalse FAIL
ldarg.s 0x91
ldc.i4 0x91
conv.i2
ceq
brfalse FAIL
ldarg.s 0x92
ldc.i4 0x92
conv.i2
ceq
brfalse FAIL
ldarg.s 0x93
ldc.i4 0x93
conv.i2
ceq
brfalse FAIL
ldarg.s 0x94
ldc.i4 0x94
conv.i2
ceq
brfalse FAIL
ldarg.s 0x95
ldc.i4 0x95
conv.i2
ceq
brfalse FAIL
ldarg.s 0x96
ldc.i4 0x96
conv.i2
ceq
brfalse FAIL
ldarg.s 0x97
ldc.i4 0x97
conv.i2
ceq
brfalse FAIL
ldarg.s 0x98
ldc.i4 0x98
conv.i2
ceq
brfalse FAIL
ldarg.s 0x99
ldc.i4 0x99
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9a
ldc.i4 0x9a
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9b
ldc.i4 0x9b
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9c
ldc.i4 0x9c
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9d
ldc.i4 0x9d
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9e
ldc.i4 0x9e
conv.i2
ceq
brfalse FAIL
ldarg.s 0x9f
ldc.i4 0x9f
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa0
ldc.i4 0xa0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa1
ldc.i4 0xa1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa2
ldc.i4 0xa2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa3
ldc.i4 0xa3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa4
ldc.i4 0xa4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa5
ldc.i4 0xa5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa6
ldc.i4 0xa6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa7
ldc.i4 0xa7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa8
ldc.i4 0xa8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xa9
ldc.i4 0xa9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xaa
ldc.i4 0xaa
conv.i2
ceq
brfalse FAIL
ldarg.s 0xab
ldc.i4 0xab
conv.i2
ceq
brfalse FAIL
ldarg.s 0xac
ldc.i4 0xac
conv.i2
ceq
brfalse FAIL
ldarg.s 0xad
ldc.i4 0xad
conv.i2
ceq
brfalse FAIL
ldarg.s 0xae
ldc.i4 0xae
conv.i2
ceq
brfalse FAIL
ldarg.s 0xaf
ldc.i4 0xaf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb0
ldc.i4 0xb0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb1
ldc.i4 0xb1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb2
ldc.i4 0xb2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb3
ldc.i4 0xb3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb4
ldc.i4 0xb4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb5
ldc.i4 0xb5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb6
ldc.i4 0xb6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb7
ldc.i4 0xb7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb8
ldc.i4 0xb8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xb9
ldc.i4 0xb9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xba
ldc.i4 0xba
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbb
ldc.i4 0xbb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbc
ldc.i4 0xbc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbd
ldc.i4 0xbd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbe
ldc.i4 0xbe
conv.i2
ceq
brfalse FAIL
ldarg.s 0xbf
ldc.i4 0xbf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc0
ldc.i4 0xc0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc1
ldc.i4 0xc1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc2
ldc.i4 0xc2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc3
ldc.i4 0xc3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc4
ldc.i4 0xc4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc5
ldc.i4 0xc5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc6
ldc.i4 0xc6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc7
ldc.i4 0xc7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc8
ldc.i4 0xc8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xc9
ldc.i4 0xc9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xca
ldc.i4 0xca
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcb
ldc.i4 0xcb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcc
ldc.i4 0xcc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcd
ldc.i4 0xcd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xce
ldc.i4 0xce
conv.i2
ceq
brfalse FAIL
ldarg.s 0xcf
ldc.i4 0xcf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd0
ldc.i4 0xd0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd1
ldc.i4 0xd1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd2
ldc.i4 0xd2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd3
ldc.i4 0xd3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd4
ldc.i4 0xd4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd5
ldc.i4 0xd5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd6
ldc.i4 0xd6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd7
ldc.i4 0xd7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd8
ldc.i4 0xd8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xd9
ldc.i4 0xd9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xda
ldc.i4 0xda
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdb
ldc.i4 0xdb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdc
ldc.i4 0xdc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdd
ldc.i4 0xdd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xde
ldc.i4 0xde
conv.i2
ceq
brfalse FAIL
ldarg.s 0xdf
ldc.i4 0xdf
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe0
ldc.i4 0xe0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe1
ldc.i4 0xe1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe2
ldc.i4 0xe2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe3
ldc.i4 0xe3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe4
ldc.i4 0xe4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe5
ldc.i4 0xe5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe6
ldc.i4 0xe6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe7
ldc.i4 0xe7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe8
ldc.i4 0xe8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xe9
ldc.i4 0xe9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xea
ldc.i4 0xea
conv.i2
ceq
brfalse FAIL
ldarg.s 0xeb
ldc.i4 0xeb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xec
ldc.i4 0xec
conv.i2
ceq
brfalse FAIL
ldarg.s 0xed
ldc.i4 0xed
conv.i2
ceq
brfalse FAIL
ldarg.s 0xee
ldc.i4 0xee
conv.i2
ceq
brfalse FAIL
ldarg.s 0xef
ldc.i4 0xef
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf0
ldc.i4 0xf0
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf1
ldc.i4 0xf1
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf2
ldc.i4 0xf2
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf3
ldc.i4 0xf3
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf4
ldc.i4 0xf4
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf5
ldc.i4 0xf5
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf6
ldc.i4 0xf6
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf7
ldc.i4 0xf7
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf8
ldc.i4 0xf8
conv.i2
ceq
brfalse FAIL
ldarg.s 0xf9
ldc.i4 0xf9
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfa
ldc.i4 0xfa
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfb
ldc.i4 0xfb
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfc
ldc.i4 0xfc
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfd
ldc.i4 0xfd
conv.i2
ceq
brfalse FAIL
ldarg.s 0xfe
ldc.i4 0xfe
conv.i2
ceq
brfalse FAIL
ldarg.s 0xff
ldc.i4 0xff
conv.i2
ceq
brfalse FAIL
ldc.i4.1
ret
FAIL:
ldc.i4.0
ret
}
.method public static int32 main(class [mscorlib]System.String[]) {
.entrypoint
.maxstack 2
call int32 ldarg_s_i2::test_int16()
ldc.i4.1
ceq
brfalse FAIL
ldc.i4 100
ret
FAIL:
ldc.i4.0
ret
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/Interop/PInvoke/SizeParamIndex/PInvoke/PassingByOut/PassingByOutTest.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
using Xunit;
/// <summary>
/// Pass Array Size by out keyword using SizeParamIndex Attributes
/// </summary>
public class ClientMarshalArrayAsSizeParamIndexByOutTest
{
#region ByOut
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayByte_AsByOut_AsSizeParamIndex(
out byte arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out byte[] arrByte);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArraySbyte_AsByOut_AsSizeParamIndex(
out sbyte arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out sbyte[] arrSbyte);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayShort_AsByOut_AsSizeParamIndex(
out short arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out short[] arrShort);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayShortReturnNegative_AsByOut_AsSizeParamIndex(
out short arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out short[] arrShort);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayUshort_AsByOut_AsSizeParamIndex(
[MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1)] out ushort[] arrUshort, out ushort arrSize);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayInt_AsByOut_AsSizeParamIndex(
out Int32 arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out Int32[] arrInt32);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayUInt_AsByOut_AsSizeParamIndex(
out UInt32 arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out UInt32[] arrUInt32);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayLong_AsByOut_AsSizeParamIndex(
out long arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out long[] arrLong);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayUlong_AsByOut_AsSizeParamIndex(
[MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1)] out ulong[] arrUlong, out ulong arrSize, ulong unused);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayString_AsByOut_AsSizeParamIndex(
[MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1, ArraySubType = UnmanagedType.BStr)] out string[] arrInt32, out int arrSize);
#endregion
static void SizeParamTypeIsByte()
{
string strDescription = "Scenario(byte ==> BYTE): Array_Size(N->M) = 1";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
byte byte_Array_Size;
byte[] arrByte;
Assert.True(MarshalCStyleArrayByte_AsByOut_AsSizeParamIndex(out byte_Array_Size, out arrByte));
//Construct Expected array
int expected_ByteArray_Size = 1;
byte[] expectedArrByte = Helper.GetExpChangeArray<byte>(expected_ByteArray_Size);
Assert.True(Helper.EqualArray<byte>(arrByte, (int)byte_Array_Size, expectedArrByte, (int)expectedArrByte.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsSByte()
{
string strDescription = "Scenario(sbyte ==> CHAR):Array_Size(N->M) = sbyte.Max";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
sbyte sbyte_Array_Size;
sbyte[] arrSbyte;
Assert.True(MarshalCStyleArraySbyte_AsByOut_AsSizeParamIndex(out sbyte_Array_Size, out arrSbyte));
sbyte[] expectedArrSbyte = Helper.GetExpChangeArray<sbyte>(sbyte.MaxValue);
Assert.True(Helper.EqualArray<sbyte>(arrSbyte, (int)sbyte_Array_Size, expectedArrSbyte, (int)expectedArrSbyte.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsShort1()
{
string strDescription = "Scenario(short ==> SHORT)1,Array_Size(M->N) = -1, Array_Size(N->M)=(ShortMax+1)/2";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
short shortArray_Size = (short)-1;
short[] arrShort = Helper.InitArray<short>(10);
Assert.True(MarshalCStyleArrayShort_AsByOut_AsSizeParamIndex(out shortArray_Size, out arrShort));
//Construct Expected Array
int expected_ShortArray_Size = 16384;//(SHRT_MAX+1)/2
short[] expectedArrShort = Helper.GetExpChangeArray<short>(expected_ShortArray_Size);
Assert.True(Helper.EqualArray<short>(arrShort, (int)shortArray_Size, expectedArrShort, (int)expectedArrShort.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsShort2()
{
string strDescription = "Scenario(short ==> SHORT)2, Array_Size = 10, Array_Size(N->M) = -1";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
short short_Array_Size = (short)10;
short[] arrShort = Helper.InitArray<short>(short_Array_Size);
Assert.Throws<OverflowException>(() => MarshalCStyleArrayShortReturnNegative_AsByOut_AsSizeParamIndex(out short_Array_Size, out arrShort));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsUShort()
{
string strDescription = "Scenario(ushort==>USHORT): Array_Size(N->M) = ushort.MaxValue";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
ushort ushort_Array_Size;
ushort[] arrUshort;
Assert.True(MarshalCStyleArrayUshort_AsByOut_AsSizeParamIndex(out arrUshort, out ushort_Array_Size));
//Expected Array
ushort[] expectedArrUshort = Helper.GetExpChangeArray<ushort>(ushort.MaxValue);
Assert.True(Helper.EqualArray<ushort>(arrUshort, (int)ushort_Array_Size, expectedArrUshort, (ushort)expectedArrUshort.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsInt32()
{
string strDescription = "Scenario(Int32 ==> LONG): Array_Size(N->M) = 0 ";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
Int32 Int32_Array_Size;
Int32[] arrInt32;
Assert.True(MarshalCStyleArrayInt_AsByOut_AsSizeParamIndex(out Int32_Array_Size, out arrInt32));
//Expected Array
Int32[] expectedArrInt32 = Helper.GetExpChangeArray<Int32>(0);
Assert.True(Helper.EqualArray<Int32>(arrInt32, Int32_Array_Size, expectedArrInt32, expectedArrInt32.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsUInt32()
{
string strDescription = "Scenario(UInt32 ==> ULONG): Array_Size(N->M) = 20";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_UInt32ArraySize = 20;
UInt32 UInt32_Array_Size = (UInt32)10;
UInt32[] arrUInt32 = Helper.InitArray<UInt32>((Int32)UInt32_Array_Size);
Assert.True(MarshalCStyleArrayUInt_AsByOut_AsSizeParamIndex(out UInt32_Array_Size, out arrUInt32));
//Construct expected
UInt32[] expectedArrUInt32 = Helper.GetExpChangeArray<UInt32>(expected_UInt32ArraySize);
Assert.True(Helper.EqualArray<UInt32>(arrUInt32, (Int32)UInt32_Array_Size, expectedArrUInt32, (Int32)expectedArrUInt32.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsLong()
{
string strDescription = "Scenario(long ==> LONGLONG): Array_Size(N->M) = 20";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_LongArraySize = 20;
long long_Array_Size = (long)10;
long[] arrLong = Helper.InitArray<long>((Int32)long_Array_Size);
Assert.True(MarshalCStyleArrayLong_AsByOut_AsSizeParamIndex(out long_Array_Size, out arrLong));
long[] expectedArrLong = Helper.GetExpChangeArray<long>(expected_LongArraySize);
Assert.True(Helper.EqualArray<long>(arrLong, (Int32)long_Array_Size, expectedArrLong, (Int32)expectedArrLong.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsULong()
{
string strDescription = "Scenario(ulong ==> ULONGLONG): Array_Size(N->M) = 1000";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_ULongArraySize = 1000;
ulong ulong_Array_Size = (ulong)10;
ulong[] arrUlong = Helper.InitArray<ulong>((Int32)ulong_Array_Size);
Assert.True(MarshalCStyleArrayUlong_AsByOut_AsSizeParamIndex(out arrUlong, out ulong_Array_Size, ulong_Array_Size));
ulong[] expectedArrUlong = Helper.GetExpChangeArray<ulong>(expected_ULongArraySize);
Assert.True(Helper.EqualArray<ulong>(arrUlong, (Int32)ulong_Array_Size, expectedArrUlong, (Int32)expectedArrUlong.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsString()
{
string strDescription = "Scenario(String ==> BSTR): Array_Size(N->M) = 20";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_StringArraySize = 20;
int string_Array_Size = 10;
String[] arrString = Helper.InitArray<String>(string_Array_Size);
Assert.True(MarshalCStyleArrayString_AsByOut_AsSizeParamIndex(out arrString, out string_Array_Size));
String[] expArrString = Helper.GetExpChangeArray<String>(expected_StringArraySize);
Assert.True(Helper.EqualArray<String>(arrString, string_Array_Size, expArrString, expArrString.Length));
Console.WriteLine(strDescription + " Ends!");
}
static int Main()
{
try{
SizeParamTypeIsByte();
SizeParamTypeIsSByte();
SizeParamTypeIsShort1();
SizeParamTypeIsShort2();
SizeParamTypeIsUShort();
SizeParamTypeIsInt32();
SizeParamTypeIsUInt32();
SizeParamTypeIsLong();
SizeParamTypeIsULong();
if (OperatingSystem.IsWindows())
{
SizeParamTypeIsString();
}
return 100;
}
catch (Exception e)
{
Console.WriteLine($"Test Failure: {e}");
return 101;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
using Xunit;
/// <summary>
/// Pass Array Size by out keyword using SizeParamIndex Attributes
/// </summary>
public class ClientMarshalArrayAsSizeParamIndexByOutTest
{
#region ByOut
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayByte_AsByOut_AsSizeParamIndex(
out byte arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out byte[] arrByte);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArraySbyte_AsByOut_AsSizeParamIndex(
out sbyte arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out sbyte[] arrSbyte);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayShort_AsByOut_AsSizeParamIndex(
out short arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out short[] arrShort);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayShortReturnNegative_AsByOut_AsSizeParamIndex(
out short arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out short[] arrShort);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayUshort_AsByOut_AsSizeParamIndex(
[MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1)] out ushort[] arrUshort, out ushort arrSize);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayInt_AsByOut_AsSizeParamIndex(
out Int32 arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out Int32[] arrInt32);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayUInt_AsByOut_AsSizeParamIndex(
out UInt32 arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out UInt32[] arrUInt32);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayLong_AsByOut_AsSizeParamIndex(
out long arrSize, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] out long[] arrLong);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayUlong_AsByOut_AsSizeParamIndex(
[MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1)] out ulong[] arrUlong, out ulong arrSize, ulong unused);
[DllImport("PInvokePassingByOutNative")]
private static extern bool MarshalCStyleArrayString_AsByOut_AsSizeParamIndex(
[MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 1, ArraySubType = UnmanagedType.BStr)] out string[] arrInt32, out int arrSize);
#endregion
static void SizeParamTypeIsByte()
{
string strDescription = "Scenario(byte ==> BYTE): Array_Size(N->M) = 1";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
byte byte_Array_Size;
byte[] arrByte;
Assert.True(MarshalCStyleArrayByte_AsByOut_AsSizeParamIndex(out byte_Array_Size, out arrByte));
//Construct Expected array
int expected_ByteArray_Size = 1;
byte[] expectedArrByte = Helper.GetExpChangeArray<byte>(expected_ByteArray_Size);
Assert.True(Helper.EqualArray<byte>(arrByte, (int)byte_Array_Size, expectedArrByte, (int)expectedArrByte.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsSByte()
{
string strDescription = "Scenario(sbyte ==> CHAR):Array_Size(N->M) = sbyte.Max";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
sbyte sbyte_Array_Size;
sbyte[] arrSbyte;
Assert.True(MarshalCStyleArraySbyte_AsByOut_AsSizeParamIndex(out sbyte_Array_Size, out arrSbyte));
sbyte[] expectedArrSbyte = Helper.GetExpChangeArray<sbyte>(sbyte.MaxValue);
Assert.True(Helper.EqualArray<sbyte>(arrSbyte, (int)sbyte_Array_Size, expectedArrSbyte, (int)expectedArrSbyte.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsShort1()
{
string strDescription = "Scenario(short ==> SHORT)1,Array_Size(M->N) = -1, Array_Size(N->M)=(ShortMax+1)/2";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
short shortArray_Size = (short)-1;
short[] arrShort = Helper.InitArray<short>(10);
Assert.True(MarshalCStyleArrayShort_AsByOut_AsSizeParamIndex(out shortArray_Size, out arrShort));
//Construct Expected Array
int expected_ShortArray_Size = 16384;//(SHRT_MAX+1)/2
short[] expectedArrShort = Helper.GetExpChangeArray<short>(expected_ShortArray_Size);
Assert.True(Helper.EqualArray<short>(arrShort, (int)shortArray_Size, expectedArrShort, (int)expectedArrShort.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsShort2()
{
string strDescription = "Scenario(short ==> SHORT)2, Array_Size = 10, Array_Size(N->M) = -1";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
short short_Array_Size = (short)10;
short[] arrShort = Helper.InitArray<short>(short_Array_Size);
Assert.Throws<OverflowException>(() => MarshalCStyleArrayShortReturnNegative_AsByOut_AsSizeParamIndex(out short_Array_Size, out arrShort));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsUShort()
{
string strDescription = "Scenario(ushort==>USHORT): Array_Size(N->M) = ushort.MaxValue";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
ushort ushort_Array_Size;
ushort[] arrUshort;
Assert.True(MarshalCStyleArrayUshort_AsByOut_AsSizeParamIndex(out arrUshort, out ushort_Array_Size));
//Expected Array
ushort[] expectedArrUshort = Helper.GetExpChangeArray<ushort>(ushort.MaxValue);
Assert.True(Helper.EqualArray<ushort>(arrUshort, (int)ushort_Array_Size, expectedArrUshort, (ushort)expectedArrUshort.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsInt32()
{
string strDescription = "Scenario(Int32 ==> LONG): Array_Size(N->M) = 0 ";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
Int32 Int32_Array_Size;
Int32[] arrInt32;
Assert.True(MarshalCStyleArrayInt_AsByOut_AsSizeParamIndex(out Int32_Array_Size, out arrInt32));
//Expected Array
Int32[] expectedArrInt32 = Helper.GetExpChangeArray<Int32>(0);
Assert.True(Helper.EqualArray<Int32>(arrInt32, Int32_Array_Size, expectedArrInt32, expectedArrInt32.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsUInt32()
{
string strDescription = "Scenario(UInt32 ==> ULONG): Array_Size(N->M) = 20";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_UInt32ArraySize = 20;
UInt32 UInt32_Array_Size = (UInt32)10;
UInt32[] arrUInt32 = Helper.InitArray<UInt32>((Int32)UInt32_Array_Size);
Assert.True(MarshalCStyleArrayUInt_AsByOut_AsSizeParamIndex(out UInt32_Array_Size, out arrUInt32));
//Construct expected
UInt32[] expectedArrUInt32 = Helper.GetExpChangeArray<UInt32>(expected_UInt32ArraySize);
Assert.True(Helper.EqualArray<UInt32>(arrUInt32, (Int32)UInt32_Array_Size, expectedArrUInt32, (Int32)expectedArrUInt32.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsLong()
{
string strDescription = "Scenario(long ==> LONGLONG): Array_Size(N->M) = 20";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_LongArraySize = 20;
long long_Array_Size = (long)10;
long[] arrLong = Helper.InitArray<long>((Int32)long_Array_Size);
Assert.True(MarshalCStyleArrayLong_AsByOut_AsSizeParamIndex(out long_Array_Size, out arrLong));
long[] expectedArrLong = Helper.GetExpChangeArray<long>(expected_LongArraySize);
Assert.True(Helper.EqualArray<long>(arrLong, (Int32)long_Array_Size, expectedArrLong, (Int32)expectedArrLong.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsULong()
{
string strDescription = "Scenario(ulong ==> ULONGLONG): Array_Size(N->M) = 1000";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_ULongArraySize = 1000;
ulong ulong_Array_Size = (ulong)10;
ulong[] arrUlong = Helper.InitArray<ulong>((Int32)ulong_Array_Size);
Assert.True(MarshalCStyleArrayUlong_AsByOut_AsSizeParamIndex(out arrUlong, out ulong_Array_Size, ulong_Array_Size));
ulong[] expectedArrUlong = Helper.GetExpChangeArray<ulong>(expected_ULongArraySize);
Assert.True(Helper.EqualArray<ulong>(arrUlong, (Int32)ulong_Array_Size, expectedArrUlong, (Int32)expectedArrUlong.Length));
Console.WriteLine(strDescription + " Ends!");
}
static void SizeParamTypeIsString()
{
string strDescription = "Scenario(String ==> BSTR): Array_Size(N->M) = 20";
Console.WriteLine();
Console.WriteLine(strDescription + " Starts!");
int expected_StringArraySize = 20;
int string_Array_Size = 10;
String[] arrString = Helper.InitArray<String>(string_Array_Size);
Assert.True(MarshalCStyleArrayString_AsByOut_AsSizeParamIndex(out arrString, out string_Array_Size));
String[] expArrString = Helper.GetExpChangeArray<String>(expected_StringArraySize);
Assert.True(Helper.EqualArray<String>(arrString, string_Array_Size, expArrString, expArrString.Length));
Console.WriteLine(strDescription + " Ends!");
}
static int Main()
{
try{
SizeParamTypeIsByte();
SizeParamTypeIsSByte();
SizeParamTypeIsShort1();
SizeParamTypeIsShort2();
SizeParamTypeIsUShort();
SizeParamTypeIsInt32();
SizeParamTypeIsUInt32();
SizeParamTypeIsLong();
SizeParamTypeIsULong();
if (OperatingSystem.IsWindows())
{
SizeParamTypeIsString();
}
return 100;
}
catch (Exception e)
{
Console.WriteLine($"Test Failure: {e}");
return 101;
}
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/JIT/Regression/CLR-x86-JIT/v2.1/b609280/b609280.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/mono/mono/tests/bug-348522.2.cs | //
// From test: Bug 348522
//
using System;
using System.Reflection;
using System.Globalization;
public struct SimpleStruct {
public int a;
public int b;
public SimpleStruct (int a, int b)
{
this.a = a;
this.b = b;
}
}
class NullableTestClass
{
public bool hasValue;
public int bVal;
public void F (SimpleStruct? code)
{
if (hasValue = code.HasValue)
bVal = code.Value.b;
}
}
class PrimitiveTestClass
{
public int val;
public void i4 (int code) {
val = code;
}
}
struct GenericStruct<T>
{
T t;
}
class GenericClass<T>
{
T t;
}
class Driver
{
public static GenericStruct<T> StructTest <T> (GenericStruct <T> t)
{
return t;
}
public static GenericClass<T> ReferenceTest <T> (GenericClass <T> t)
{
return t;
}
static int Main ()
{
BindingFlags flags = BindingFlags.Instance | BindingFlags.Public | BindingFlags.InvokeMethod;
MethodInfo mi = typeof (NullableTestClass).GetMethod ("F");
NullableTestClass nullable = new NullableTestClass ();
SimpleStruct? test = new SimpleStruct (90, 90);
mi.Invoke (nullable, flags, new PassesStuffBinder (null), new object [] {null}, null);
if (nullable.hasValue) {
Console.WriteLine ("invoked nullabled with null arg but did not get a null in the method");
return 1;
}
nullable = new NullableTestClass ();
mi.Invoke (nullable, flags, new PassesStuffBinder (new SimpleStruct (10, 20)), new object [] {200}, null);
if (!nullable.hasValue || nullable.bVal != 20) {
Console.WriteLine ("invoked nullabled with boxed struct, but did not get it");
return 2;
}
nullable = new NullableTestClass ();
mi.Invoke (nullable, flags, new PassesStuffBinder (test), new object [] {200}, null);
if (!nullable.hasValue || nullable.bVal != 90) {
Console.WriteLine ("invoked nullabled with nullable literal, but did not get it");
return 3;
}
mi = typeof (PrimitiveTestClass).GetMethod ("i4");
PrimitiveTestClass prim = new PrimitiveTestClass ();
mi.Invoke (prim, flags, new PassesStuffBinder ((byte)10), new object [] {88}, null);
if (prim.val != 10) {
Console.WriteLine ("invoked primitive with byte, it should be widened to int "+ prim.val);
return 4;
}
try {
mi.Invoke (prim, flags, new PassesStuffBinder (Missing.Value), new object [] {null}, null);
Console.WriteLine ("invoked literal with reference value");
return 5;
} catch (Exception) {
}
try {
MethodInfo method = typeof (Driver).GetMethod ("StructTest");
MethodInfo generic_method = method.MakeGenericMethod (typeof (int));
generic_method.Invoke (null, new object [] { new GenericStruct<int>() });
method = typeof (Driver).GetMethod ("ReferenceTest");
generic_method = method.MakeGenericMethod (typeof (int));
generic_method.Invoke (null, new object [] { new GenericClass<int>() });
} catch (Exception e) {
Console.WriteLine ("calling with generic arg failed "+e);
return 6;
}
return 0;
}
}
class PassesStuffBinder : BaseBinder
{
object stuff = null;
public PassesStuffBinder (object stuff)
{
this.stuff = stuff;
}
public override object ChangeType (object value, Type type1, CultureInfo culture)
{
return stuff;
}
}
class BaseBinder : Binder {
public override MethodBase BindToMethod (BindingFlags bindingAttr, MethodBase [] match, ref object [] args,
ParameterModifier [] modifiers, CultureInfo culture, string [] names,
out object state)
{
state = null;
return match [0];
}
public override object ChangeType (object value, Type type1, CultureInfo culture)
{
return (ulong) 0xdeadbeefcafebabe;
}
// The rest is just to please the compiler
public override FieldInfo BindToField (System.Reflection.BindingFlags a,
System.Reflection.FieldInfo[] b, object c, System.Globalization.CultureInfo d)
{
return null;
}
public override void ReorderArgumentArray(ref object[] a, object b) {
}
public override MethodBase SelectMethod(System.Reflection.BindingFlags
a, System.Reflection.MethodBase[] b, System.Type[] c,
System.Reflection.ParameterModifier[] d) {
return null;
}
public override PropertyInfo
SelectProperty(System.Reflection.BindingFlags a,
System.Reflection.PropertyInfo[] b, System.Type c, System.Type[] d,
System.Reflection.ParameterModifier[] e) {
return null;
}
}
| //
// From test: Bug 348522
//
using System;
using System.Reflection;
using System.Globalization;
public struct SimpleStruct {
public int a;
public int b;
public SimpleStruct (int a, int b)
{
this.a = a;
this.b = b;
}
}
class NullableTestClass
{
public bool hasValue;
public int bVal;
public void F (SimpleStruct? code)
{
if (hasValue = code.HasValue)
bVal = code.Value.b;
}
}
class PrimitiveTestClass
{
public int val;
public void i4 (int code) {
val = code;
}
}
struct GenericStruct<T>
{
T t;
}
class GenericClass<T>
{
T t;
}
class Driver
{
public static GenericStruct<T> StructTest <T> (GenericStruct <T> t)
{
return t;
}
public static GenericClass<T> ReferenceTest <T> (GenericClass <T> t)
{
return t;
}
static int Main ()
{
BindingFlags flags = BindingFlags.Instance | BindingFlags.Public | BindingFlags.InvokeMethod;
MethodInfo mi = typeof (NullableTestClass).GetMethod ("F");
NullableTestClass nullable = new NullableTestClass ();
SimpleStruct? test = new SimpleStruct (90, 90);
mi.Invoke (nullable, flags, new PassesStuffBinder (null), new object [] {null}, null);
if (nullable.hasValue) {
Console.WriteLine ("invoked nullabled with null arg but did not get a null in the method");
return 1;
}
nullable = new NullableTestClass ();
mi.Invoke (nullable, flags, new PassesStuffBinder (new SimpleStruct (10, 20)), new object [] {200}, null);
if (!nullable.hasValue || nullable.bVal != 20) {
Console.WriteLine ("invoked nullabled with boxed struct, but did not get it");
return 2;
}
nullable = new NullableTestClass ();
mi.Invoke (nullable, flags, new PassesStuffBinder (test), new object [] {200}, null);
if (!nullable.hasValue || nullable.bVal != 90) {
Console.WriteLine ("invoked nullabled with nullable literal, but did not get it");
return 3;
}
mi = typeof (PrimitiveTestClass).GetMethod ("i4");
PrimitiveTestClass prim = new PrimitiveTestClass ();
mi.Invoke (prim, flags, new PassesStuffBinder ((byte)10), new object [] {88}, null);
if (prim.val != 10) {
Console.WriteLine ("invoked primitive with byte, it should be widened to int "+ prim.val);
return 4;
}
try {
mi.Invoke (prim, flags, new PassesStuffBinder (Missing.Value), new object [] {null}, null);
Console.WriteLine ("invoked literal with reference value");
return 5;
} catch (Exception) {
}
try {
MethodInfo method = typeof (Driver).GetMethod ("StructTest");
MethodInfo generic_method = method.MakeGenericMethod (typeof (int));
generic_method.Invoke (null, new object [] { new GenericStruct<int>() });
method = typeof (Driver).GetMethod ("ReferenceTest");
generic_method = method.MakeGenericMethod (typeof (int));
generic_method.Invoke (null, new object [] { new GenericClass<int>() });
} catch (Exception e) {
Console.WriteLine ("calling with generic arg failed "+e);
return 6;
}
return 0;
}
}
class PassesStuffBinder : BaseBinder
{
object stuff = null;
public PassesStuffBinder (object stuff)
{
this.stuff = stuff;
}
public override object ChangeType (object value, Type type1, CultureInfo culture)
{
return stuff;
}
}
class BaseBinder : Binder {
public override MethodBase BindToMethod (BindingFlags bindingAttr, MethodBase [] match, ref object [] args,
ParameterModifier [] modifiers, CultureInfo culture, string [] names,
out object state)
{
state = null;
return match [0];
}
public override object ChangeType (object value, Type type1, CultureInfo culture)
{
return (ulong) 0xdeadbeefcafebabe;
}
// The rest is just to please the compiler
public override FieldInfo BindToField (System.Reflection.BindingFlags a,
System.Reflection.FieldInfo[] b, object c, System.Globalization.CultureInfo d)
{
return null;
}
public override void ReorderArgumentArray(ref object[] a, object b) {
}
public override MethodBase SelectMethod(System.Reflection.BindingFlags
a, System.Reflection.MethodBase[] b, System.Type[] c,
System.Reflection.ParameterModifier[] d) {
return null;
}
public override PropertyInfo
SelectProperty(System.Reflection.BindingFlags a,
System.Reflection.PropertyInfo[] b, System.Type c, System.Type[] d,
System.Reflection.ParameterModifier[] e) {
return null;
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M10/b06464/b06464.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
class Test_b06464
{
static int[] a = new int[10];
static int[] A()
{
Console.WriteLine("A");
return a;
}
static int F()
{
Console.WriteLine("F");
return 1;
}
static int G()
{
Console.WriteLine("G");
return 1;
}
public static int Main()
{
A()[F()] = G();
return 100;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
class Test_b06464
{
static int[] a = new int[10];
static int[] A()
{
Console.WriteLine("A");
return a;
}
static int F()
{
Console.WriteLine("F");
return 1;
}
static int G()
{
Console.WriteLine("G");
return 1;
}
public static int Main()
{
A()[F()] = G();
return 100;
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/libraries/System.Security.Cryptography.Xml/src/System/Security/Cryptography/Xml/CanonicalXmlDocument.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Xml;
using System.Text;
namespace System.Security.Cryptography.Xml
{
// all input types eventually lead to the creation of an XmlDocument document
// of this type. it maintains the node subset state and performs output rendering during canonicalization
internal sealed class CanonicalXmlDocument : XmlDocument, ICanonicalizableNode
{
private readonly bool _defaultNodeSetInclusionState;
private readonly bool _includeComments;
private bool _isInNodeSet;
public CanonicalXmlDocument(bool defaultNodeSetInclusionState, bool includeComments) : base()
{
PreserveWhitespace = true;
_includeComments = includeComments;
_isInNodeSet = _defaultNodeSetInclusionState = defaultNodeSetInclusionState;
}
public bool IsInNodeSet
{
get { return _isInNodeSet; }
set { _isInNodeSet = value; }
}
public void Write(StringBuilder strBuilder, DocPosition docPos, AncestralNamespaceContextManager anc)
{
docPos = DocPosition.BeforeRootElement;
foreach (XmlNode childNode in ChildNodes)
{
if (childNode.NodeType == XmlNodeType.Element)
{
CanonicalizationDispatcher.Write(childNode, strBuilder, DocPosition.InRootElement, anc);
docPos = DocPosition.AfterRootElement;
}
else
{
CanonicalizationDispatcher.Write(childNode, strBuilder, docPos, anc);
}
}
}
public void WriteHash(HashAlgorithm hash, DocPosition docPos, AncestralNamespaceContextManager anc)
{
docPos = DocPosition.BeforeRootElement;
foreach (XmlNode childNode in ChildNodes)
{
if (childNode.NodeType == XmlNodeType.Element)
{
CanonicalizationDispatcher.WriteHash(childNode, hash, DocPosition.InRootElement, anc);
docPos = DocPosition.AfterRootElement;
}
else
{
CanonicalizationDispatcher.WriteHash(childNode, hash, docPos, anc);
}
}
}
public override XmlElement CreateElement(string prefix, string localName, string namespaceURI)
{
return new CanonicalXmlElement(prefix, localName, namespaceURI, this, _defaultNodeSetInclusionState);
}
public override XmlAttribute CreateAttribute(string prefix, string localName, string namespaceURI)
{
return new CanonicalXmlAttribute(prefix, localName, namespaceURI, this, _defaultNodeSetInclusionState);
}
protected override XmlAttribute CreateDefaultAttribute(string prefix, string localName, string namespaceURI)
{
return new CanonicalXmlAttribute(prefix, localName, namespaceURI, this, _defaultNodeSetInclusionState);
}
public override XmlText CreateTextNode(string text)
{
return new CanonicalXmlText(text, this, _defaultNodeSetInclusionState);
}
public override XmlWhitespace CreateWhitespace(string prefix)
{
return new CanonicalXmlWhitespace(prefix, this, _defaultNodeSetInclusionState);
}
public override XmlSignificantWhitespace CreateSignificantWhitespace(string text)
{
return new CanonicalXmlSignificantWhitespace(text, this, _defaultNodeSetInclusionState);
}
public override XmlProcessingInstruction CreateProcessingInstruction(string target, string data)
{
return new CanonicalXmlProcessingInstruction(target, data, this, _defaultNodeSetInclusionState);
}
public override XmlComment CreateComment(string data)
{
return new CanonicalXmlComment(data, this, _defaultNodeSetInclusionState, _includeComments);
}
public override XmlEntityReference CreateEntityReference(string name)
{
return new CanonicalXmlEntityReference(name, this, _defaultNodeSetInclusionState);
}
public override XmlCDataSection CreateCDataSection(string data)
{
return new CanonicalXmlCDataSection(data, this, _defaultNodeSetInclusionState);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Xml;
using System.Text;
namespace System.Security.Cryptography.Xml
{
// all input types eventually lead to the creation of an XmlDocument document
// of this type. it maintains the node subset state and performs output rendering during canonicalization
internal sealed class CanonicalXmlDocument : XmlDocument, ICanonicalizableNode
{
private readonly bool _defaultNodeSetInclusionState;
private readonly bool _includeComments;
private bool _isInNodeSet;
public CanonicalXmlDocument(bool defaultNodeSetInclusionState, bool includeComments) : base()
{
PreserveWhitespace = true;
_includeComments = includeComments;
_isInNodeSet = _defaultNodeSetInclusionState = defaultNodeSetInclusionState;
}
public bool IsInNodeSet
{
get { return _isInNodeSet; }
set { _isInNodeSet = value; }
}
public void Write(StringBuilder strBuilder, DocPosition docPos, AncestralNamespaceContextManager anc)
{
docPos = DocPosition.BeforeRootElement;
foreach (XmlNode childNode in ChildNodes)
{
if (childNode.NodeType == XmlNodeType.Element)
{
CanonicalizationDispatcher.Write(childNode, strBuilder, DocPosition.InRootElement, anc);
docPos = DocPosition.AfterRootElement;
}
else
{
CanonicalizationDispatcher.Write(childNode, strBuilder, docPos, anc);
}
}
}
public void WriteHash(HashAlgorithm hash, DocPosition docPos, AncestralNamespaceContextManager anc)
{
docPos = DocPosition.BeforeRootElement;
foreach (XmlNode childNode in ChildNodes)
{
if (childNode.NodeType == XmlNodeType.Element)
{
CanonicalizationDispatcher.WriteHash(childNode, hash, DocPosition.InRootElement, anc);
docPos = DocPosition.AfterRootElement;
}
else
{
CanonicalizationDispatcher.WriteHash(childNode, hash, docPos, anc);
}
}
}
public override XmlElement CreateElement(string prefix, string localName, string namespaceURI)
{
return new CanonicalXmlElement(prefix, localName, namespaceURI, this, _defaultNodeSetInclusionState);
}
public override XmlAttribute CreateAttribute(string prefix, string localName, string namespaceURI)
{
return new CanonicalXmlAttribute(prefix, localName, namespaceURI, this, _defaultNodeSetInclusionState);
}
protected override XmlAttribute CreateDefaultAttribute(string prefix, string localName, string namespaceURI)
{
return new CanonicalXmlAttribute(prefix, localName, namespaceURI, this, _defaultNodeSetInclusionState);
}
public override XmlText CreateTextNode(string text)
{
return new CanonicalXmlText(text, this, _defaultNodeSetInclusionState);
}
public override XmlWhitespace CreateWhitespace(string prefix)
{
return new CanonicalXmlWhitespace(prefix, this, _defaultNodeSetInclusionState);
}
public override XmlSignificantWhitespace CreateSignificantWhitespace(string text)
{
return new CanonicalXmlSignificantWhitespace(text, this, _defaultNodeSetInclusionState);
}
public override XmlProcessingInstruction CreateProcessingInstruction(string target, string data)
{
return new CanonicalXmlProcessingInstruction(target, data, this, _defaultNodeSetInclusionState);
}
public override XmlComment CreateComment(string data)
{
return new CanonicalXmlComment(data, this, _defaultNodeSetInclusionState, _includeComments);
}
public override XmlEntityReference CreateEntityReference(string name)
{
return new CanonicalXmlEntityReference(name, this, _defaultNodeSetInclusionState);
}
public override XmlCDataSection CreateCDataSection(string data)
{
return new CanonicalXmlCDataSection(data, this, _defaultNodeSetInclusionState);
}
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/JIT/Generics/Parameters/instance_passing_class01.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
public struct ValX0 { }
public struct ValY0 { }
public struct ValX1<T> { }
public struct ValY1<T> { }
public struct ValX2<T, U> { }
public struct ValY2<T, U> { }
public struct ValX3<T, U, V> { }
public struct ValY3<T, U, V> { }
public class RefX0 { }
public class RefY0 { }
public class RefX1<T> { }
public class RefY1<T> { }
public class RefX2<T, U> { }
public class RefY2<T, U> { }
public class RefX3<T, U, V> { }
public class RefY3<T, U, V> { }
public class Gen<T>
{
public T PassAsIn(T t)
{
return t;
}
public T PassAsRef(ref T t, T Fld2)
{
T temp = t;
t = Fld2;
return temp;
}
public void PassAsOut(out T t, T Fld2)
{
t = Fld2;
}
public void PassAsParameter(T t1, T t2)
{
T temp = t1;
Test_instance_passing_class01.Eval(t1.Equals(PassAsIn(temp)));
Test_instance_passing_class01.Eval(t1.Equals(PassAsRef(ref temp, t2)));
Test_instance_passing_class01.Eval(t2.Equals(temp));
temp = t1;
PassAsOut(out temp, t2);
Test_instance_passing_class01.Eval(t2.Equals(temp));
}
}
public class Test_instance_passing_class01
{
public static int counter = 0;
public static bool result = true;
public static void Eval(bool exp)
{
counter++;
if (!exp)
{
result = exp;
Console.WriteLine("Test Failed at location: " + counter);
}
}
public static int Main()
{
int _int1 = 1;
int _int2 = -1;
new Gen<int>().PassAsParameter(_int1, _int2);
double _double1 = 1;
double _double2 = -1;
new Gen<double>().PassAsParameter(_double1, _double2);
string _string1 = "string1";
string _string2 = "string2";
new Gen<string>().PassAsParameter(_string1, _string2);
object _object1 = (object)_string1;
object _object2 = (object)_string2;
new Gen<object>().PassAsParameter(_object1, _object2);
Guid _Guid1 = new Guid(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
Guid _Guid2 = new Guid(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
new Gen<Guid>().PassAsParameter(_Guid1, _Guid2);
if (result)
{
Console.WriteLine("Test Passed");
return 100;
}
else
{
Console.WriteLine("Test Failed");
return 1;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
public struct ValX0 { }
public struct ValY0 { }
public struct ValX1<T> { }
public struct ValY1<T> { }
public struct ValX2<T, U> { }
public struct ValY2<T, U> { }
public struct ValX3<T, U, V> { }
public struct ValY3<T, U, V> { }
public class RefX0 { }
public class RefY0 { }
public class RefX1<T> { }
public class RefY1<T> { }
public class RefX2<T, U> { }
public class RefY2<T, U> { }
public class RefX3<T, U, V> { }
public class RefY3<T, U, V> { }
public class Gen<T>
{
public T PassAsIn(T t)
{
return t;
}
public T PassAsRef(ref T t, T Fld2)
{
T temp = t;
t = Fld2;
return temp;
}
public void PassAsOut(out T t, T Fld2)
{
t = Fld2;
}
public void PassAsParameter(T t1, T t2)
{
T temp = t1;
Test_instance_passing_class01.Eval(t1.Equals(PassAsIn(temp)));
Test_instance_passing_class01.Eval(t1.Equals(PassAsRef(ref temp, t2)));
Test_instance_passing_class01.Eval(t2.Equals(temp));
temp = t1;
PassAsOut(out temp, t2);
Test_instance_passing_class01.Eval(t2.Equals(temp));
}
}
public class Test_instance_passing_class01
{
public static int counter = 0;
public static bool result = true;
public static void Eval(bool exp)
{
counter++;
if (!exp)
{
result = exp;
Console.WriteLine("Test Failed at location: " + counter);
}
}
public static int Main()
{
int _int1 = 1;
int _int2 = -1;
new Gen<int>().PassAsParameter(_int1, _int2);
double _double1 = 1;
double _double2 = -1;
new Gen<double>().PassAsParameter(_double1, _double2);
string _string1 = "string1";
string _string2 = "string2";
new Gen<string>().PassAsParameter(_string1, _string2);
object _object1 = (object)_string1;
object _object2 = (object)_string2;
new Gen<object>().PassAsParameter(_object1, _object2);
Guid _Guid1 = new Guid(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1);
Guid _Guid2 = new Guid(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
new Gen<Guid>().PassAsParameter(_Guid1, _Guid2);
if (result)
{
Console.WriteLine("Test Passed");
return 100;
}
else
{
Console.WriteLine("Test Failed");
return 1;
}
}
}
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest812/Generated812.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated812.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated812.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,007 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr | Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | lambdageek | 2022-03-01T15:38:09Z | 2022-03-03T15:38:37Z | faa272f860f83802a9cee65619e6e2e841b05433 | 1cfa6d6964d890f9673b25d2165532f9a43710a7 | [monoapi] Add mono_method_get_unmanaged_callers_only_ftnptr. Like `RuntimeMethodHandle.GetFunctionPointer`, but callable from native code | ./src/tests/JIT/Methodical/VT/etc/nested.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib
{
}
.assembly 'nested' // as "nested"
{
}
.assembly extern xunit.core {}
.namespace JitTest
{
.class value private auto ansi sealed Struct1
extends [mscorlib]System.ValueType
{
.class value auto ansi sealed nested public Struct1$Struct2
extends [mscorlib]System.ValueType
{
.field private int32 m_i2
.field private int64 m_l2
.method public hidebysig instance void
Verify() il managed
{
.maxstack 8
IL_0000: ldarg.0
IL_0001: ldfld int32 JitTest.Struct1/Struct1$Struct2::m_i2
IL_0006: brtrue.s IL_0012
IL_0008: ldarg.0
IL_0009: ldfld int64 JitTest.Struct1/Struct1$Struct2::m_l2
IL_000e: ldc.i4.0
IL_000f: conv.i8
IL_0010: beq.s IL_0018
IL_0012: newobj instance void [mscorlib]System.Exception::.ctor()
IL_0017: throw
IL_0018: ret
} // end of method Struct1$Struct2::Verify
} // end of class Struct1$Struct2
.field private int32 m_i1
.field private int64 m_l1
.field public value class JitTest.Struct1/Struct1$Struct2 m_str2
.method public hidebysig instance void
Verify() il managed
{
.maxstack 8
IL_0000: ldarg.0
IL_0001: ldfld int32 JitTest.Struct1::m_i1
IL_0006: brtrue.s IL_0012
IL_0008: ldarg.0
IL_0009: ldfld int64 JitTest.Struct1::m_l1
IL_000e: ldc.i4.0
IL_000f: conv.i8
IL_0010: beq.s IL_0018
IL_0012: newobj instance void [mscorlib]System.Exception::.ctor()
IL_0017: throw
IL_0018: ldarg.0
IL_0019: ldflda value class JitTest.Struct1/Struct1$Struct2 JitTest.Struct1::m_str2
IL_001e: call instance void JitTest.Struct1/Struct1$Struct2::Verify()
IL_0023: ret
} // end of method Struct1::Verify
} // end of class Struct1
.class private auto ansi Test
extends [mscorlib]System.Object
{
.method private hidebysig static int32
Main() il managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 1
.locals (value class JitTest.Struct1 V_0,
typedref V_1,
value class JitTest.Struct1/Struct1$Struct2 V_2)
IL_0000: ldloca.s V_0
IL_0002: initobj JitTest.Struct1
IL_0008: ldloca.s V_0
IL_000a: mkrefany JitTest.Struct1
IL_0011: refanyval JitTest.Struct1
IL_0016: ldobj JitTest.Struct1
IL_001b: stloc.0
IL_001c: ldloca.s V_0
IL_001e: call instance void JitTest.Struct1::Verify()
IL_0023: ldloca.s V_0
IL_0025: ldflda value class JitTest.Struct1/Struct1$Struct2 JitTest.Struct1::m_str2
IL_002a: mkrefany JitTest.Struct1/Struct1$Struct2
IL_0031: refanyval JitTest.Struct1/Struct1$Struct2
IL_0036: ldobj JitTest.Struct1/Struct1$Struct2
IL_003b: stloc.2
IL_003c: ldloca.s V_2
IL_003e: call instance void JitTest.Struct1/Struct1$Struct2::Verify()
ldc.i4 100
IL_0043: ret
} // end of method Test::Main
.method public hidebysig specialname rtspecialname
instance void .ctor() il managed
{
.maxstack 8
IL_0000: ldarg.0
IL_0001: call instance void [mscorlib]System.Object::.ctor()
IL_0006: ret
} // end of method Test::.ctor
} // end of class Test
} // end of namespace JitTest
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib
{
}
.assembly 'nested' // as "nested"
{
}
.assembly extern xunit.core {}
.namespace JitTest
{
.class value private auto ansi sealed Struct1
extends [mscorlib]System.ValueType
{
.class value auto ansi sealed nested public Struct1$Struct2
extends [mscorlib]System.ValueType
{
.field private int32 m_i2
.field private int64 m_l2
.method public hidebysig instance void
Verify() il managed
{
.maxstack 8
IL_0000: ldarg.0
IL_0001: ldfld int32 JitTest.Struct1/Struct1$Struct2::m_i2
IL_0006: brtrue.s IL_0012
IL_0008: ldarg.0
IL_0009: ldfld int64 JitTest.Struct1/Struct1$Struct2::m_l2
IL_000e: ldc.i4.0
IL_000f: conv.i8
IL_0010: beq.s IL_0018
IL_0012: newobj instance void [mscorlib]System.Exception::.ctor()
IL_0017: throw
IL_0018: ret
} // end of method Struct1$Struct2::Verify
} // end of class Struct1$Struct2
.field private int32 m_i1
.field private int64 m_l1
.field public value class JitTest.Struct1/Struct1$Struct2 m_str2
.method public hidebysig instance void
Verify() il managed
{
.maxstack 8
IL_0000: ldarg.0
IL_0001: ldfld int32 JitTest.Struct1::m_i1
IL_0006: brtrue.s IL_0012
IL_0008: ldarg.0
IL_0009: ldfld int64 JitTest.Struct1::m_l1
IL_000e: ldc.i4.0
IL_000f: conv.i8
IL_0010: beq.s IL_0018
IL_0012: newobj instance void [mscorlib]System.Exception::.ctor()
IL_0017: throw
IL_0018: ldarg.0
IL_0019: ldflda value class JitTest.Struct1/Struct1$Struct2 JitTest.Struct1::m_str2
IL_001e: call instance void JitTest.Struct1/Struct1$Struct2::Verify()
IL_0023: ret
} // end of method Struct1::Verify
} // end of class Struct1
.class private auto ansi Test
extends [mscorlib]System.Object
{
.method private hidebysig static int32
Main() il managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 1
.locals (value class JitTest.Struct1 V_0,
typedref V_1,
value class JitTest.Struct1/Struct1$Struct2 V_2)
IL_0000: ldloca.s V_0
IL_0002: initobj JitTest.Struct1
IL_0008: ldloca.s V_0
IL_000a: mkrefany JitTest.Struct1
IL_0011: refanyval JitTest.Struct1
IL_0016: ldobj JitTest.Struct1
IL_001b: stloc.0
IL_001c: ldloca.s V_0
IL_001e: call instance void JitTest.Struct1::Verify()
IL_0023: ldloca.s V_0
IL_0025: ldflda value class JitTest.Struct1/Struct1$Struct2 JitTest.Struct1::m_str2
IL_002a: mkrefany JitTest.Struct1/Struct1$Struct2
IL_0031: refanyval JitTest.Struct1/Struct1$Struct2
IL_0036: ldobj JitTest.Struct1/Struct1$Struct2
IL_003b: stloc.2
IL_003c: ldloca.s V_2
IL_003e: call instance void JitTest.Struct1/Struct1$Struct2::Verify()
ldc.i4 100
IL_0043: ret
} // end of method Test::Main
.method public hidebysig specialname rtspecialname
instance void .ctor() il managed
{
.maxstack 8
IL_0000: ldarg.0
IL_0001: call instance void [mscorlib]System.Object::.ctor()
IL_0006: ret
} // end of method Test::.ctor
} // end of class Test
} // end of namespace JitTest
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/calls.c | /**
* \file
*/
#include <config.h>
#include <mono/utils/mono-compiler.h>
#ifndef DISABLE_JIT
#include "mini.h"
#include "ir-emit.h"
#include "mini-runtime.h"
#include "llvmonly-runtime.h"
#include "mini-llvm.h"
#include "jit-icalls.h"
#include "aot-compiler.h"
#include <mono/metadata/abi-details.h>
#include <mono/metadata/class-abi-details.h>
#include <mono/utils/mono-utils-debug.h>
#include "mono/metadata/icall-signatures.h"
static const gboolean debug_tailcall_break_compile = FALSE; // break in method_to_ir
static const gboolean debug_tailcall_break_run = FALSE; // insert breakpoint in generated code
MonoJumpInfoTarget
mono_call_to_patch (MonoCallInst *call)
{
MonoJumpInfoTarget patch;
MonoJitICallId jit_icall_id;
// This is similar to amd64 emit_call.
if (call->inst.flags & MONO_INST_HAS_METHOD) {
patch.type = MONO_PATCH_INFO_METHOD;
patch.target = call->method;
} else if ((jit_icall_id = call->jit_icall_id)) {
patch.type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch.target = GUINT_TO_POINTER (jit_icall_id);
} else {
patch.type = MONO_PATCH_INFO_ABS;
patch.target = call->fptr;
}
return patch;
}
void
mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip)
{
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
mono_add_patch_info (cfg, ip, patch.type, patch.target);
}
void
mini_test_tailcall (MonoCompile *cfg, gboolean tailcall)
{
// A lot of tests say "tailcall" throughout their verbose output.
// "tailcalllog" is more searchable.
//
// Do not change "tailcalllog" here without changing other places, e.g. tests that search for it.
//
g_assertf (tailcall || !mini_debug_options.test_tailcall_require, "tailcalllog fail from %s", cfg->method->name);
mono_tailcall_print ("tailcalllog %s from %s\n", tailcall ? "success" : "fail", cfg->method->name);
}
void
mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig)
{
// OP_TAILCALL_PARAMETER helps compute the size of code, in order
// to size branches around OP_TAILCALL_[REG,MEMBASE].
//
// The actual bytes are output from OP_TAILCALL_[REG,MEMBASE].
// OP_TAILCALL_PARAMETER is an overestimate because typically
// many parameters are in registers.
const int n = sig->param_count + (sig->hasthis ? 1 : 0);
for (int i = 0; i < n; ++i) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_TAILCALL_PARAMETER);
MONO_ADD_INS (cfg->cbb, ins);
}
}
static int
ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
{
handle_enum:
type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_VOID:
return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
case MONO_TYPE_R4:
return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
case MONO_TYPE_R8:
return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
} else
return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_TYPEDBYREF:
return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_GENERICINST:
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt */
return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
default:
g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
}
return -1;
}
MonoCallInst *
mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall,
gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target)
{
MonoType *sig_ret;
MonoCallInst *call;
cfg->has_calls = TRUE;
if (tailcall && cfg->llvm_only) {
// FIXME tailcall should not be changed this late.
// FIXME It really should not be changed due to llvm_only.
// Accuracy is presently available MONO_IS_TAILCALL_OPCODE (call).
tailcall = FALSE;
mono_tailcall_print ("losing tailcall in %s due to llvm_only\n", cfg->method->name);
mini_test_tailcall (cfg, FALSE);
}
if (tailcall && (debug_tailcall_break_compile || debug_tailcall_break_run)
&& mono_is_usermode_native_debugger_present ()) {
if (debug_tailcall_break_compile)
G_BREAKPOINT ();
if (tailcall && debug_tailcall_break_run) { // Can change tailcall in debugger.
MonoInst *brk;
MONO_INST_NEW (cfg, brk, OP_BREAK);
MONO_ADD_INS (cfg->cbb, brk);
}
}
if (tailcall) {
mini_profiler_emit_tail_call (cfg, target);
mini_emit_tailcall_parameters (cfg, sig);
MONO_INST_NEW_CALL (cfg, call, calli ? OP_TAILCALL_REG : virtual_ ? OP_TAILCALL_MEMBASE : OP_TAILCALL);
} else
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
call->args = args;
call->signature = sig;
call->rgctx_reg = rgctx;
sig_ret = mini_get_underlying_type (sig->ret);
mini_type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
if (tailcall) {
if (mini_type_is_vtype (sig_ret)) {
call->vret_var = cfg->vret_addr;
//g_assert_not_reached ();
}
} else if (mini_type_is_vtype (sig_ret)) {
MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
MonoInst *loada;
temp->backend.is_pinvoke = sig->pinvoke && !sig->marshalling_disabled;
/*
* We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
* address of return value to increase optimization opportunities.
* Before vtype decomposition, the dreg of the call ins itself represents the
* fact the call modifies the return value. After decomposition, the call will
* be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
* will be transformed into an LDADDR.
*/
MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
loada->dreg = alloc_preg (cfg);
loada->inst_p0 = temp;
/* We reference the call too since call->dreg could change during optimization */
loada->inst_p1 = call;
MONO_ADD_INS (cfg->cbb, loada);
call->inst.dreg = temp->dreg;
call->vret_var = loada;
} else if (!MONO_TYPE_IS_VOID (sig_ret))
call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg)) {
/*
* If the call has a float argument, we would need to do an r8->r4 conversion using
* an icall, but that cannot be done during the call sequence since it would clobber
* the call registers + the stack. So we do it before emitting the call.
*/
for (int i = 0; i < sig->param_count + sig->hasthis; ++i) {
MonoType *t;
MonoInst *in = call->args [i];
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mono_type_get_underlying_type (t);
if (!m_type_is_byref (t) && t->type == MONO_TYPE_R4) {
MonoInst *iargs [1];
MonoInst *conv;
iargs [0] = in;
conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
/* The result will be in an int vreg */
call->args [i] = conv;
}
}
}
#endif
call->need_unbox_trampoline = unbox_trampoline;
#ifdef ENABLE_LLVM
if (COMPILE_LLVM (cfg))
mono_llvm_emit_call (cfg, call);
else
mono_arch_emit_call (cfg, call);
#else
mono_arch_emit_call (cfg, call);
#endif
cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->flags |= MONO_CFG_HAS_CALLS;
return call;
}
gboolean
mini_should_check_stack_pointer (MonoCompile *cfg)
{
// This logic is shared by mini_emit_calli_full and is_supported_tailcall,
// in order to compute tailcall_supported earlier. Alternatively it could be passed
// out from mini_emit_calli_full -- if it has not been copied around
// or decisions made based on it.
WrapperInfo *info;
return cfg->check_pinvoke_callconv &&
cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE &&
((info = mono_marshal_get_wrapper_info (cfg->method))) &&
info->subtype == WRAPPER_SUBTYPE_PINVOKE;
}
static void
set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
{
mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
cfg->uses_rgctx_reg = TRUE;
call->rgctx_reg = TRUE;
#ifdef ENABLE_LLVM
call->rgctx_arg_reg = rgctx_reg;
#endif
}
/* Either METHOD or IMT_ARG needs to be set */
static void
emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
{
int method_reg;
g_assert (method || imt_arg);
if (COMPILE_LLVM (cfg)) {
if (imt_arg) {
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
#ifdef ENABLE_LLVM
call->imt_arg_reg = method_reg;
#endif
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
return;
}
if (imt_arg) {
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
}
MonoInst*
mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr,
MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall)
{
MonoCallInst *call;
MonoInst *ins;
int rgctx_reg = -1;
g_assert (!rgctx_arg || !imt_arg);
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
const gboolean check_sp = mini_should_check_stack_pointer (cfg);
// Checking stack pointer requires running code after a function call, prevents tailcall.
// Caller needs to have decided that earlier.
g_assert (!check_sp || !tailcall);
if (check_sp) {
if (!cfg->stack_inbalance_var)
cfg->stack_inbalance_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
MONO_INST_NEW (cfg, ins, OP_GET_SP);
ins->dreg = cfg->stack_inbalance_var->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
call = mini_emit_call_args (cfg, sig, args, TRUE, FALSE, tailcall, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
call->inst.sreg1 = addr->dreg;
if (imt_arg)
emit_imt_argument (cfg, call, NULL, imt_arg);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
if (check_sp) {
int sp_reg;
sp_reg = mono_alloc_preg (cfg);
MONO_INST_NEW (cfg, ins, OP_GET_SP);
ins->dreg = sp_reg;
MONO_ADD_INS (cfg->cbb, ins);
/* Restore the stack so we don't crash when throwing the exception */
MONO_INST_NEW (cfg, ins, OP_SET_SP);
ins->sreg1 = cfg->stack_inbalance_var->dreg;
MONO_ADD_INS (cfg->cbb, ins);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
}
if (rgctx_arg)
set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
return (MonoInst*)call;
}
MonoInst*
mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
// Historical version without gboolean tailcall parameter.
{
return mini_emit_calli_full (cfg, sig, args, addr, imt_arg, rgctx_arg, FALSE);
}
static int
callvirt_to_call (int opcode)
{
switch (opcode) {
case OP_TAILCALL_MEMBASE:
return OP_TAILCALL;
case OP_CALL_MEMBASE:
return OP_CALL;
case OP_VOIDCALL_MEMBASE:
return OP_VOIDCALL;
case OP_FCALL_MEMBASE:
return OP_FCALL;
case OP_RCALL_MEMBASE:
return OP_RCALL;
case OP_VCALL_MEMBASE:
return OP_VCALL;
case OP_LCALL_MEMBASE:
return OP_LCALL;
default:
g_assert_not_reached ();
}
return -1;
}
static gboolean
can_enter_interp (MonoCompile *cfg, MonoMethod *method, gboolean virtual_)
{
if (method->wrapper_type)
return FALSE;
if (m_class_get_image (method->klass) == m_class_get_image (cfg->method->klass)) {
/* When using AOT profiling, the method might not be AOTed */
if (cfg->compile_aot && mono_aot_can_enter_interp (method))
return TRUE;
/* Virtual calls from corlib can go outside corlib */
if (!virtual_)
return FALSE;
}
/* See needs_extra_arg () in mini-llvm.c */
if (method->string_ctor)
return FALSE;
if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero")))
return FALSE;
/* Assume all calls outside the assembly can enter the interpreter */
return TRUE;
}
MonoInst*
mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall,
MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
{
gboolean virtual_ = this_ins != NULL;
MonoCallInst *call;
int rgctx_reg = 0;
gboolean need_unbox_trampoline;
if (!sig)
sig = mono_method_signature_internal (method);
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
if (method->string_ctor) {
/* Create the real signature */
/* FIXME: Cache these */
MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
ctor_sig->ret = m_class_get_byval_arg (mono_defaults.string_class);
sig = ctor_sig;
}
mini_method_check_context_used (cfg, method);
if (cfg->llvm_only && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
return mini_emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
if (cfg->llvm_only && cfg->interp && !virtual_ && !tailcall && can_enter_interp (cfg, method, FALSE)) {
MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
/* Need wrappers for this signature to be able to enter interpreter */
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
/* This call might need to enter the interpreter so make it indirect */
return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
}
need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
call = mini_emit_call_args (cfg, sig, args, FALSE, virtual_, tailcall, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
call->method = method;
call->inst.flags |= MONO_INST_HAS_METHOD;
call->inst.inst_left = this_ins;
// FIXME This has already been read in amd64 parameter construction.
// Fixing it generates incorrect code. CEE_JMP needs attention.
call->tailcall = tailcall;
if (virtual_) {
int vtable_reg, slot_reg, this_reg;
int offset;
this_reg = this_ins->dreg;
if (!cfg->llvm_only && (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
MonoInst *dummy_use;
MONO_EMIT_NULL_CHECK (cfg, this_reg, FALSE);
/* Make a call to delegate->invoke_impl */
call->inst.inst_basereg = this_reg;
call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
/* We must emit a dummy use here because the delegate trampoline will
replace the 'this' argument with the delegate target making this activation
no longer a root for the delegate.
This is an issue for delegates that target collectible code such as dynamic
methods of GC'able assemblies.
For a test case look into #667921.
FIXME: a dummy use is not the best way to do it as the local register allocator
will put it on a caller save register and spill it around the call.
Ideally, we would either put it on a callee save register or only do the store part.
*/
EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
return (MonoInst*)call;
}
if ((!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
(MONO_METHOD_IS_FINAL (method)))) {
/*
* the method is not virtual, we just need to ensure this is not null
* and then we can call the method directly.
*/
virtual_ = FALSE;
} else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
/*
* the method is virtual, but we can statically dispatch since either
* it's class or the method itself are sealed.
* But first we need to ensure it's not a null reference.
*/
virtual_ = FALSE;
}
if (!virtual_) {
if (!method->string_ctor)
MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
}
if (!virtual_ && cfg->llvm_only && cfg->interp && !tailcall && can_enter_interp (cfg, method, FALSE)) {
MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
/* Need wrappers for this signature to be able to enter interpreter */
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
/* This call might need to enter the interpreter so make it indirect */
return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
} else if (!virtual_) {
call->inst.opcode = callvirt_to_call (call->inst.opcode);
} else {
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (mono_class_is_interface (method->klass)) {
guint32 imt_slot = mono_method_get_imt_slot (method);
emit_imt_argument (cfg, call, call->method, imt_arg);
slot_reg = vtable_reg;
offset = ((gint32)imt_slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
} else {
slot_reg = vtable_reg;
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P));
if (imt_arg) {
g_assert (mono_method_signature_internal (method)->generic_param_count);
emit_imt_argument (cfg, call, call->method, imt_arg);
}
}
call->inst.sreg1 = slot_reg;
call->inst.inst_offset = offset;
call->is_virtual = TRUE;
}
}
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
if (rgctx_arg)
set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
return (MonoInst*)call;
}
MonoInst*
mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
{
return mini_emit_method_call_full (cfg, method, mono_method_signature_internal (method), FALSE, args, this_ins, NULL, NULL);
}
static
MonoInst*
mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
MonoInst **args)
{
MonoCallInst *call;
g_assert (sig);
call = mini_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
call->fptr = func;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
return (MonoInst*)call;
}
MonoInst*
mono_emit_jit_icall_id (MonoCompile *cfg, MonoJitICallId jit_icall_id, MonoInst **args)
{
MonoJitICallInfo *info = mono_find_jit_icall_info (jit_icall_id);
MonoCallInst *call = (MonoCallInst *)mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
call->jit_icall_id = jit_icall_id;
return (MonoInst*)call;
}
/*
* mini_emit_abs_call:
*
* Emit a call to the runtime function described by PATCH_TYPE and DATA.
*/
MonoInst*
mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
MonoMethodSignature *sig, MonoInst **args)
{
MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
MonoInst *ins;
/*
* We pass ji as the call address, the PATCH_INFO_ABS resolving code will
* handle it.
* FIXME: Is the abs_patches hashtable avoidable?
* Such as by putting the patch info in the call instruction?
*/
if (cfg->abs_patches == NULL)
cfg->abs_patches = g_hash_table_new (NULL, NULL);
g_hash_table_insert (cfg->abs_patches, ji, ji);
ins = mono_emit_native_call (cfg, ji, sig, args);
((MonoCallInst*)ins)->fptr_is_patch = TRUE;
return ins;
}
MonoInst*
mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
{
static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline = NULL;
MonoInst *icall_args [16];
MonoInst *call_target, *ins, *vtable_ins;
int arg_reg, this_reg, vtable_reg;
gboolean is_iface = mono_class_is_interface (cmethod->klass);
gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
gboolean variant_iface = FALSE;
guint32 slot;
int offset;
gboolean special_array_interface = m_class_is_array_special_interface (cmethod->klass);
if (cfg->interp && can_enter_interp (cfg, cmethod, TRUE)) {
/* Need wrappers for this signature to be able to enter interpreter */
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, fsig);
if (m_class_is_delegate (cmethod->klass) && !strcmp (cmethod->name, "Invoke")) {
/* To support dynamically generated code, add a signature for the actual method called by the delegate as well. */
MonoMethodSignature *nothis_sig = mono_metadata_signature_dup_add_this (m_class_get_image (cmethod->klass), fsig, mono_get_object_class ());
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, nothis_sig);
}
}
/*
* In llvm-only mode, vtables contain function descriptors instead of
* method addresses/trampolines.
*/
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE);
if (is_iface)
slot = mono_method_get_imt_slot (cmethod);
else
slot = mono_method_get_vtable_index (cmethod);
this_reg = sp [0]->dreg;
if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
variant_iface = TRUE;
if (!helper_sig_llvmonly_imt_trampoline) {
MonoMethodSignature *tmp = mono_icall_sig_ptr_ptr_ptr;
mono_memory_barrier ();
helper_sig_llvmonly_imt_trampoline = tmp;
}
if (!cfg->gsharedvt && (m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke")) {
/* Delegate invokes */
MONO_EMIT_NULL_CHECK (cfg, this_reg, FALSE);
/* Make a call to delegate->invoke_impl */
int invoke_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, invoke_reg, this_reg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl));
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, invoke_reg, 0);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, invoke_reg, TARGET_SIZEOF_VOID_P);
return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
if (!is_gsharedvt && !fsig->generic_param_count && !is_iface) {
/*
* The simplest case, a normal virtual call.
*/
int slot_reg = alloc_preg (cfg);
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
MonoBasicBlock *non_null_bb;
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
/* Load the vtable slot, which contains a function descriptor. */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
NEW_BBLOCK (cfg, non_null_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
/* Slow path */
// FIXME: Make the wrapper use the preserveall cconv
// FIXME: Use one icall per slot for small slot numbers ?
icall_args [0] = vtable_ins;
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
/* Make the icall return the vtable slot value to save some code space */
ins = mono_emit_jit_icall (cfg, mini_llvmonly_init_vtable_slot, icall_args);
ins->dreg = slot_reg;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
/* Fastpath */
MONO_START_BB (cfg, non_null_bb);
/* Load the address + arg from the vtable slot */
EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
if (!is_gsharedvt && !fsig->generic_param_count && is_iface && !variant_iface && !special_array_interface) {
/*
* A simple interface call
*
* We make a call through an imt slot to obtain the function descriptor we need to call.
* The imt slot contains a function descriptor for a runtime function + arg.
*/
int slot_reg = alloc_preg (cfg);
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
/*
* The slot is already initialized when the vtable is created so there is no need
* to check it here.
*/
/* Load the imt slot, which contains a function descriptor. */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
/* Load the address + arg of the imt thunk from the imt slot */
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
/*
* IMT thunks in llvm-only mode are C functions which take an info argument
* plus the imt method and return the ftndesc to call.
*/
icall_args [0] = thunk_arg_ins;
icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
if (!is_gsharedvt && (fsig->generic_param_count || variant_iface || special_array_interface)) {
/*
* This is similar to the interface case, the vtable slot points to an imt thunk which is
* dynamically extended as more instantiations are discovered.
* This handles generic virtual methods both on classes and interfaces.
*/
int slot_reg = alloc_preg (cfg);
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
int ftndesc_reg = alloc_preg (cfg);
MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
MonoBasicBlock *slowpath_bb, *end_bb;
NEW_BBLOCK (cfg, slowpath_bb);
NEW_BBLOCK (cfg, end_bb);
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (is_iface)
offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
else
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
/* Load the slot, which contains a function descriptor. */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
/* These slots are not initialized, so fall back to the slow path until they are initialized */
/* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
/* Fastpath */
/* Same as with iface calls */
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
icall_args [0] = thunk_arg_ins;
icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
ftndesc_ins->dreg = ftndesc_reg;
/*
* Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
* they don't know about yet. Fall back to the slowpath in that case.
*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Slowpath */
MONO_START_BB (cfg, slowpath_bb);
icall_args [0] = vtable_ins;
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
if (is_iface)
ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_iface_call, icall_args);
else
ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_call, icall_args);
ftndesc_ins->dreg = ftndesc_reg;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Common case */
MONO_START_BB (cfg, end_bb);
return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
if (is_gsharedvt && !(is_iface || fsig->generic_param_count || variant_iface || special_array_interface)) {
MonoInst *ftndesc_ins;
/* Normal virtual call using a gsharedvt calling conv */
icall_args [0] = sp [0];
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_vcall_gsharedvt_fast, icall_args);
return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
/*
* Non-optimized cases
*/
icall_args [0] = sp [0];
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
arg_reg = alloc_preg (cfg);
MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, mono_get_int_type ());
g_assert (is_gsharedvt);
if (is_iface)
call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_iface_call_gsharedvt, icall_args);
else
call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_vcall_gsharedvt, icall_args);
/*
* Pass the extra argument even if the callee doesn't receive it, most
* calling conventions allow this.
*/
return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
static MonoMethodSignature*
sig_to_rgctx_sig (MonoMethodSignature *sig)
{
// FIXME: memory allocation
MonoMethodSignature *res;
int i;
res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
res->param_count = sig->param_count + 1;
for (i = 0; i < sig->param_count; ++i)
res->params [i] = sig->params [i];
res->params [sig->param_count] = mono_class_get_byref_type (mono_defaults.int_class);
return res;
}
/* Make an indirect call to FSIG passing an additional argument */
MonoInst*
mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
{
MonoMethodSignature *csig;
MonoInst *args_buf [16];
MonoInst **args;
int i, pindex, tmp_reg;
/* Make a call with an rgctx/extra arg */
if (fsig->param_count + 2 < 16)
args = args_buf;
else
args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
pindex = 0;
if (fsig->hasthis)
args [pindex ++] = orig_args [0];
for (i = 0; i < fsig->param_count; ++i)
args [pindex ++] = orig_args [fsig->hasthis + i];
tmp_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
csig = sig_to_rgctx_sig (fsig);
return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
}
/* Emit an indirect call to the function descriptor ADDR */
MonoInst*
mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
// FIXME no tailcall support
{
int addr_reg, arg_reg;
MonoInst *call_target;
g_assert (cfg->llvm_only);
/*
* addr points to a <addr, arg> pair, load both of them, and
* make a call to addr, passing arg as an extra arg.
*/
addr_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
arg_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, TARGET_SIZEOF_VOID_P);
return mini_emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
}
#else
MONO_EMPTY_SOURCE_FILE (calls);
#endif
| /**
* \file
*/
#include <config.h>
#include <mono/utils/mono-compiler.h>
#ifndef DISABLE_JIT
#include "mini.h"
#include "ir-emit.h"
#include "mini-runtime.h"
#include "llvmonly-runtime.h"
#include "mini-llvm.h"
#include "jit-icalls.h"
#include "aot-compiler.h"
#include <mono/metadata/abi-details.h>
#include <mono/metadata/class-abi-details.h>
#include <mono/utils/mono-utils-debug.h>
#include "mono/metadata/icall-signatures.h"
static const gboolean debug_tailcall_break_compile = FALSE; // break in method_to_ir
static const gboolean debug_tailcall_break_run = FALSE; // insert breakpoint in generated code
MonoJumpInfoTarget
mono_call_to_patch (MonoCallInst *call)
{
MonoJumpInfoTarget patch;
MonoJitICallId jit_icall_id;
// This is similar to amd64 emit_call.
if (call->inst.flags & MONO_INST_HAS_METHOD) {
patch.type = MONO_PATCH_INFO_METHOD;
patch.target = call->method;
} else if ((jit_icall_id = call->jit_icall_id)) {
patch.type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch.target = GUINT_TO_POINTER (jit_icall_id);
} else {
patch.type = MONO_PATCH_INFO_ABS;
patch.target = call->fptr;
}
return patch;
}
void
mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip)
{
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
mono_add_patch_info (cfg, ip, patch.type, patch.target);
}
void
mini_test_tailcall (MonoCompile *cfg, gboolean tailcall)
{
// A lot of tests say "tailcall" throughout their verbose output.
// "tailcalllog" is more searchable.
//
// Do not change "tailcalllog" here without changing other places, e.g. tests that search for it.
//
g_assertf (tailcall || !mini_debug_options.test_tailcall_require, "tailcalllog fail from %s", cfg->method->name);
mono_tailcall_print ("tailcalllog %s from %s\n", tailcall ? "success" : "fail", cfg->method->name);
}
void
mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig)
{
// OP_TAILCALL_PARAMETER helps compute the size of code, in order
// to size branches around OP_TAILCALL_[REG,MEMBASE].
//
// The actual bytes are output from OP_TAILCALL_[REG,MEMBASE].
// OP_TAILCALL_PARAMETER is an overestimate because typically
// many parameters are in registers.
const int n = sig->param_count + (sig->hasthis ? 1 : 0);
for (int i = 0; i < n; ++i) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_TAILCALL_PARAMETER);
MONO_ADD_INS (cfg->cbb, ins);
}
}
static int
ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
{
handle_enum:
type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_VOID:
return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
case MONO_TYPE_R4:
if (cfg->r4fp)
return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
else
return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
case MONO_TYPE_R8:
return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
} else
return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_TYPEDBYREF:
return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
case MONO_TYPE_GENERICINST:
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt */
return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
default:
g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
}
return -1;
}
MonoCallInst *
mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall,
gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target)
{
MonoType *sig_ret;
MonoCallInst *call;
cfg->has_calls = TRUE;
if (tailcall && cfg->llvm_only) {
// FIXME tailcall should not be changed this late.
// FIXME It really should not be changed due to llvm_only.
// Accuracy is presently available MONO_IS_TAILCALL_OPCODE (call).
tailcall = FALSE;
mono_tailcall_print ("losing tailcall in %s due to llvm_only\n", cfg->method->name);
mini_test_tailcall (cfg, FALSE);
}
if (tailcall && (debug_tailcall_break_compile || debug_tailcall_break_run)
&& mono_is_usermode_native_debugger_present ()) {
if (debug_tailcall_break_compile)
G_BREAKPOINT ();
if (tailcall && debug_tailcall_break_run) { // Can change tailcall in debugger.
MonoInst *brk;
MONO_INST_NEW (cfg, brk, OP_BREAK);
MONO_ADD_INS (cfg->cbb, brk);
}
}
if (tailcall) {
mini_profiler_emit_tail_call (cfg, target);
mini_emit_tailcall_parameters (cfg, sig);
MONO_INST_NEW_CALL (cfg, call, calli ? OP_TAILCALL_REG : virtual_ ? OP_TAILCALL_MEMBASE : OP_TAILCALL);
} else
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
call->args = args;
call->signature = sig;
call->rgctx_reg = rgctx;
sig_ret = mini_get_underlying_type (sig->ret);
mini_type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
if (tailcall) {
if (mini_type_is_vtype (sig_ret)) {
call->vret_var = cfg->vret_addr;
//g_assert_not_reached ();
}
} else if (mini_type_is_vtype (sig_ret)) {
MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
MonoInst *loada;
temp->backend.is_pinvoke = sig->pinvoke && !sig->marshalling_disabled;
/*
* We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
* address of return value to increase optimization opportunities.
* Before vtype decomposition, the dreg of the call ins itself represents the
* fact the call modifies the return value. After decomposition, the call will
* be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
* will be transformed into an LDADDR.
*/
MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
loada->dreg = alloc_preg (cfg);
loada->inst_p0 = temp;
/* We reference the call too since call->dreg could change during optimization */
loada->inst_p1 = call;
MONO_ADD_INS (cfg->cbb, loada);
call->inst.dreg = temp->dreg;
call->vret_var = loada;
} else if (!MONO_TYPE_IS_VOID (sig_ret))
call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg)) {
/*
* If the call has a float argument, we would need to do an r8->r4 conversion using
* an icall, but that cannot be done during the call sequence since it would clobber
* the call registers + the stack. So we do it before emitting the call.
*/
for (int i = 0; i < sig->param_count + sig->hasthis; ++i) {
MonoType *t;
MonoInst *in = call->args [i];
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mono_type_get_underlying_type (t);
if (!m_type_is_byref (t) && t->type == MONO_TYPE_R4) {
MonoInst *iargs [1];
MonoInst *conv;
iargs [0] = in;
conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
/* The result will be in an int vreg */
call->args [i] = conv;
}
}
}
#endif
call->need_unbox_trampoline = unbox_trampoline;
#ifdef ENABLE_LLVM
if (COMPILE_LLVM (cfg))
mono_llvm_emit_call (cfg, call);
else
mono_arch_emit_call (cfg, call);
#else
mono_arch_emit_call (cfg, call);
#endif
cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->flags |= MONO_CFG_HAS_CALLS;
return call;
}
gboolean
mini_should_check_stack_pointer (MonoCompile *cfg)
{
// This logic is shared by mini_emit_calli_full and is_supported_tailcall,
// in order to compute tailcall_supported earlier. Alternatively it could be passed
// out from mini_emit_calli_full -- if it has not been copied around
// or decisions made based on it.
WrapperInfo *info;
return cfg->check_pinvoke_callconv &&
cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE &&
((info = mono_marshal_get_wrapper_info (cfg->method))) &&
info->subtype == WRAPPER_SUBTYPE_PINVOKE;
}
static void
set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
{
mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
cfg->uses_rgctx_reg = TRUE;
call->rgctx_reg = TRUE;
#ifdef ENABLE_LLVM
call->rgctx_arg_reg = rgctx_reg;
#endif
}
/* Either METHOD or IMT_ARG needs to be set */
static void
emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
{
int method_reg;
g_assert (method || imt_arg);
if (COMPILE_LLVM (cfg)) {
if (imt_arg) {
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
#ifdef ENABLE_LLVM
call->imt_arg_reg = method_reg;
#endif
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
return;
}
if (imt_arg) {
method_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
} else {
MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
method_reg = ins->dreg;
}
mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
}
MonoInst*
mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr,
MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall)
{
MonoCallInst *call;
MonoInst *ins;
int rgctx_reg = -1;
g_assert (!rgctx_arg || !imt_arg);
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
const gboolean check_sp = mini_should_check_stack_pointer (cfg);
// Checking stack pointer requires running code after a function call, prevents tailcall.
// Caller needs to have decided that earlier.
g_assert (!check_sp || !tailcall);
if (check_sp) {
if (!cfg->stack_inbalance_var)
cfg->stack_inbalance_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
MONO_INST_NEW (cfg, ins, OP_GET_SP);
ins->dreg = cfg->stack_inbalance_var->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
call = mini_emit_call_args (cfg, sig, args, TRUE, FALSE, tailcall, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
call->inst.sreg1 = addr->dreg;
if (imt_arg)
emit_imt_argument (cfg, call, NULL, imt_arg);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
if (check_sp) {
int sp_reg;
sp_reg = mono_alloc_preg (cfg);
MONO_INST_NEW (cfg, ins, OP_GET_SP);
ins->dreg = sp_reg;
MONO_ADD_INS (cfg->cbb, ins);
/* Restore the stack so we don't crash when throwing the exception */
MONO_INST_NEW (cfg, ins, OP_SET_SP);
ins->sreg1 = cfg->stack_inbalance_var->dreg;
MONO_ADD_INS (cfg->cbb, ins);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
}
if (rgctx_arg)
set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
return (MonoInst*)call;
}
MonoInst*
mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
// Historical version without gboolean tailcall parameter.
{
return mini_emit_calli_full (cfg, sig, args, addr, imt_arg, rgctx_arg, FALSE);
}
static int
callvirt_to_call (int opcode)
{
switch (opcode) {
case OP_TAILCALL_MEMBASE:
return OP_TAILCALL;
case OP_CALL_MEMBASE:
return OP_CALL;
case OP_VOIDCALL_MEMBASE:
return OP_VOIDCALL;
case OP_FCALL_MEMBASE:
return OP_FCALL;
case OP_RCALL_MEMBASE:
return OP_RCALL;
case OP_VCALL_MEMBASE:
return OP_VCALL;
case OP_LCALL_MEMBASE:
return OP_LCALL;
default:
g_assert_not_reached ();
}
return -1;
}
static gboolean
can_enter_interp (MonoCompile *cfg, MonoMethod *method, gboolean virtual_)
{
if (method->wrapper_type)
return FALSE;
if (m_class_get_image (method->klass) == m_class_get_image (cfg->method->klass)) {
/* When using AOT profiling, the method might not be AOTed */
if (cfg->compile_aot && mono_aot_can_enter_interp (method))
return TRUE;
/* Virtual calls from corlib can go outside corlib */
if (!virtual_)
return FALSE;
}
/* See needs_extra_arg () in mini-llvm.c */
if (method->string_ctor)
return FALSE;
if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero")))
return FALSE;
/* Assume all calls outside the assembly can enter the interpreter */
return TRUE;
}
MonoInst*
mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall,
MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
{
gboolean virtual_ = this_ins != NULL;
MonoCallInst *call;
int rgctx_reg = 0;
gboolean need_unbox_trampoline;
if (!sig)
sig = mono_method_signature_internal (method);
if (rgctx_arg) {
rgctx_reg = mono_alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
}
if (method->string_ctor) {
/* Create the real signature */
/* FIXME: Cache these */
MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
ctor_sig->ret = m_class_get_byval_arg (mono_defaults.string_class);
sig = ctor_sig;
}
mini_method_check_context_used (cfg, method);
if (cfg->llvm_only && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
return mini_emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
if (cfg->llvm_only && cfg->interp && !virtual_ && !tailcall && can_enter_interp (cfg, method, FALSE)) {
MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
/* Need wrappers for this signature to be able to enter interpreter */
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
/* This call might need to enter the interpreter so make it indirect */
return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
}
need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
call = mini_emit_call_args (cfg, sig, args, FALSE, virtual_, tailcall, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
call->method = method;
call->inst.flags |= MONO_INST_HAS_METHOD;
call->inst.inst_left = this_ins;
// FIXME This has already been read in amd64 parameter construction.
// Fixing it generates incorrect code. CEE_JMP needs attention.
call->tailcall = tailcall;
if (virtual_) {
int vtable_reg, slot_reg, this_reg;
int offset;
this_reg = this_ins->dreg;
if (!cfg->llvm_only && (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
MonoInst *dummy_use;
MONO_EMIT_NULL_CHECK (cfg, this_reg, FALSE);
/* Make a call to delegate->invoke_impl */
call->inst.inst_basereg = this_reg;
call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
/* We must emit a dummy use here because the delegate trampoline will
replace the 'this' argument with the delegate target making this activation
no longer a root for the delegate.
This is an issue for delegates that target collectible code such as dynamic
methods of GC'able assemblies.
For a test case look into #667921.
FIXME: a dummy use is not the best way to do it as the local register allocator
will put it on a caller save register and spill it around the call.
Ideally, we would either put it on a callee save register or only do the store part.
*/
EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
return (MonoInst*)call;
}
if ((!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
(MONO_METHOD_IS_FINAL (method)))) {
/*
* the method is not virtual, we just need to ensure this is not null
* and then we can call the method directly.
*/
virtual_ = FALSE;
} else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
/*
* the method is virtual, but we can statically dispatch since either
* it's class or the method itself are sealed.
* But first we need to ensure it's not a null reference.
*/
virtual_ = FALSE;
}
if (!virtual_) {
if (!method->string_ctor)
MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
}
if (!virtual_ && cfg->llvm_only && cfg->interp && !tailcall && can_enter_interp (cfg, method, FALSE)) {
MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
/* Need wrappers for this signature to be able to enter interpreter */
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
/* This call might need to enter the interpreter so make it indirect */
return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
} else if (!virtual_) {
call->inst.opcode = callvirt_to_call (call->inst.opcode);
} else {
vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (mono_class_is_interface (method->klass)) {
guint32 imt_slot = mono_method_get_imt_slot (method);
emit_imt_argument (cfg, call, call->method, imt_arg);
slot_reg = vtable_reg;
offset = ((gint32)imt_slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
} else {
slot_reg = vtable_reg;
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P));
if (imt_arg) {
g_assert (mono_method_signature_internal (method)->generic_param_count);
emit_imt_argument (cfg, call, call->method, imt_arg);
}
}
call->inst.sreg1 = slot_reg;
call->inst.inst_offset = offset;
call->is_virtual = TRUE;
}
}
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
if (rgctx_arg)
set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
return (MonoInst*)call;
}
MonoInst*
mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
{
return mini_emit_method_call_full (cfg, method, mono_method_signature_internal (method), FALSE, args, this_ins, NULL, NULL);
}
static
MonoInst*
mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
MonoInst **args)
{
MonoCallInst *call;
g_assert (sig);
call = mini_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
call->fptr = func;
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
return (MonoInst*)call;
}
MonoInst*
mono_emit_jit_icall_id (MonoCompile *cfg, MonoJitICallId jit_icall_id, MonoInst **args)
{
MonoJitICallInfo *info = mono_find_jit_icall_info (jit_icall_id);
MonoCallInst *call = (MonoCallInst *)mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
call->jit_icall_id = jit_icall_id;
return (MonoInst*)call;
}
/*
* mini_emit_abs_call:
*
* Emit a call to the runtime function described by PATCH_TYPE and DATA.
*/
MonoInst*
mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
MonoMethodSignature *sig, MonoInst **args)
{
MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
MonoInst *ins;
/*
* We pass ji as the call address, the PATCH_INFO_ABS resolving code will
* handle it.
* FIXME: Is the abs_patches hashtable avoidable?
* Such as by putting the patch info in the call instruction?
*/
if (cfg->abs_patches == NULL)
cfg->abs_patches = g_hash_table_new (NULL, NULL);
g_hash_table_insert (cfg->abs_patches, ji, ji);
ins = mono_emit_native_call (cfg, ji, sig, args);
((MonoCallInst*)ins)->fptr_is_patch = TRUE;
return ins;
}
MonoInst*
mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
{
static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline = NULL;
MonoInst *icall_args [16];
MonoInst *call_target, *ins, *vtable_ins;
int arg_reg, this_reg, vtable_reg;
gboolean is_iface = mono_class_is_interface (cmethod->klass);
gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
gboolean variant_iface = FALSE;
guint32 slot;
int offset;
gboolean special_array_interface = m_class_is_array_special_interface (cmethod->klass);
if (cfg->interp && can_enter_interp (cfg, cmethod, TRUE)) {
/* Need wrappers for this signature to be able to enter interpreter */
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, fsig);
if (m_class_is_delegate (cmethod->klass) && !strcmp (cmethod->name, "Invoke")) {
/* To support dynamically generated code, add a signature for the actual method called by the delegate as well. */
MonoMethodSignature *nothis_sig = mono_metadata_signature_dup_add_this (m_class_get_image (cmethod->klass), fsig, mono_get_object_class ());
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, nothis_sig);
}
}
/*
* In llvm-only mode, vtables contain function descriptors instead of
* method addresses/trampolines.
*/
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE);
if (is_iface)
slot = mono_method_get_imt_slot (cmethod);
else
slot = mono_method_get_vtable_index (cmethod);
this_reg = sp [0]->dreg;
if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
variant_iface = TRUE;
if (!helper_sig_llvmonly_imt_trampoline) {
MonoMethodSignature *tmp = mono_icall_sig_ptr_ptr_ptr;
mono_memory_barrier ();
helper_sig_llvmonly_imt_trampoline = tmp;
}
if (!cfg->gsharedvt && (m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke")) {
/* Delegate invokes */
MONO_EMIT_NULL_CHECK (cfg, this_reg, FALSE);
/* Make a call to delegate->invoke_impl */
int invoke_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, invoke_reg, this_reg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl));
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, invoke_reg, 0);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, invoke_reg, TARGET_SIZEOF_VOID_P);
return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
if (!is_gsharedvt && !fsig->generic_param_count && !is_iface) {
/*
* The simplest case, a normal virtual call.
*/
int slot_reg = alloc_preg (cfg);
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
MonoBasicBlock *non_null_bb;
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
/* Load the vtable slot, which contains a function descriptor. */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
NEW_BBLOCK (cfg, non_null_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
/* Slow path */
// FIXME: Make the wrapper use the preserveall cconv
// FIXME: Use one icall per slot for small slot numbers ?
icall_args [0] = vtable_ins;
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
/* Make the icall return the vtable slot value to save some code space */
ins = mono_emit_jit_icall (cfg, mini_llvmonly_init_vtable_slot, icall_args);
ins->dreg = slot_reg;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
/* Fastpath */
MONO_START_BB (cfg, non_null_bb);
/* Load the address + arg from the vtable slot */
EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
if (!is_gsharedvt && !fsig->generic_param_count && is_iface && !variant_iface && !special_array_interface) {
/*
* A simple interface call
*
* We make a call through an imt slot to obtain the function descriptor we need to call.
* The imt slot contains a function descriptor for a runtime function + arg.
*/
int slot_reg = alloc_preg (cfg);
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
/*
* The slot is already initialized when the vtable is created so there is no need
* to check it here.
*/
/* Load the imt slot, which contains a function descriptor. */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
/* Load the address + arg of the imt thunk from the imt slot */
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
/*
* IMT thunks in llvm-only mode are C functions which take an info argument
* plus the imt method and return the ftndesc to call.
*/
icall_args [0] = thunk_arg_ins;
icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
if (!is_gsharedvt && (fsig->generic_param_count || variant_iface || special_array_interface)) {
/*
* This is similar to the interface case, the vtable slot points to an imt thunk which is
* dynamically extended as more instantiations are discovered.
* This handles generic virtual methods both on classes and interfaces.
*/
int slot_reg = alloc_preg (cfg);
int addr_reg = alloc_preg (cfg);
int arg_reg = alloc_preg (cfg);
int ftndesc_reg = alloc_preg (cfg);
MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
MonoBasicBlock *slowpath_bb, *end_bb;
NEW_BBLOCK (cfg, slowpath_bb);
NEW_BBLOCK (cfg, end_bb);
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (is_iface)
offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
else
offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
/* Load the slot, which contains a function descriptor. */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
/* These slots are not initialized, so fall back to the slow path until they are initialized */
/* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
/* Fastpath */
/* Same as with iface calls */
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
icall_args [0] = thunk_arg_ins;
icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
ftndesc_ins->dreg = ftndesc_reg;
/*
* Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
* they don't know about yet. Fall back to the slowpath in that case.
*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Slowpath */
MONO_START_BB (cfg, slowpath_bb);
icall_args [0] = vtable_ins;
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
if (is_iface)
ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_iface_call, icall_args);
else
ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_call, icall_args);
ftndesc_ins->dreg = ftndesc_reg;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Common case */
MONO_START_BB (cfg, end_bb);
return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
if (is_gsharedvt && !(is_iface || fsig->generic_param_count || variant_iface || special_array_interface)) {
MonoInst *ftndesc_ins;
/* Normal virtual call using a gsharedvt calling conv */
icall_args [0] = sp [0];
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_vcall_gsharedvt_fast, icall_args);
return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
/*
* Non-optimized cases
*/
icall_args [0] = sp [0];
EMIT_NEW_ICONST (cfg, icall_args [1], slot);
icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
arg_reg = alloc_preg (cfg);
MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, mono_get_int_type ());
g_assert (is_gsharedvt);
if (is_iface)
call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_iface_call_gsharedvt, icall_args);
else
call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_vcall_gsharedvt, icall_args);
/*
* Pass the extra argument even if the callee doesn't receive it, most
* calling conventions allow this.
*/
return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
static MonoMethodSignature*
sig_to_rgctx_sig (MonoMethodSignature *sig)
{
// FIXME: memory allocation
MonoMethodSignature *res;
int i;
res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
res->param_count = sig->param_count + 1;
for (i = 0; i < sig->param_count; ++i)
res->params [i] = sig->params [i];
res->params [sig->param_count] = mono_class_get_byref_type (mono_defaults.int_class);
return res;
}
/* Make an indirect call to FSIG passing an additional argument */
MonoInst*
mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
{
MonoMethodSignature *csig;
MonoInst *args_buf [16];
MonoInst **args;
int i, pindex, tmp_reg;
/* Make a call with an rgctx/extra arg */
if (fsig->param_count + 2 < 16)
args = args_buf;
else
args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
pindex = 0;
if (fsig->hasthis)
args [pindex ++] = orig_args [0];
for (i = 0; i < fsig->param_count; ++i)
args [pindex ++] = orig_args [fsig->hasthis + i];
tmp_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
csig = sig_to_rgctx_sig (fsig);
return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
}
/* Emit an indirect call to the function descriptor ADDR */
MonoInst*
mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
// FIXME no tailcall support
{
int addr_reg, arg_reg;
MonoInst *call_target;
g_assert (cfg->llvm_only);
/*
* addr points to a <addr, arg> pair, load both of them, and
* make a call to addr, passing arg as an extra arg.
*/
addr_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
arg_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, TARGET_SIZEOF_VOID_P);
return mini_emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
}
#else
MONO_EMPTY_SOURCE_FILE (calls);
#endif
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/driver.c | /**
* \file
* The new mono JIT compiler.
*
* Author:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2002-2003 Ximian, Inc.
* (C) 2003-2006 Novell, Inc.
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <signal.h>
#if HAVE_SCHED_SETAFFINITY
#include <sched.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <mono/metadata/assembly-internals.h>
#include <mono/metadata/image-internals.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/object.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/reflection-internals.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-config.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/environment-internals.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/coree.h>
#include <mono/metadata/w32process.h>
#include "mono/utils/mono-counters.h"
#include "mono/utils/mono-hwcap.h"
#include "mono/utils/mono-logger-internals.h"
#include "mono/utils/options.h"
#include "mono/metadata/w32handle.h"
#include "mono/metadata/callspec.h"
#include "mono/metadata/custom-attrs-internals.h"
#include <mono/utils/w32subset.h>
#include <mono/metadata/components.h>
#include <mono/mini/debugger-agent-external.h>
#include "mini.h"
#include <mono/jit/jit.h>
#include "aot-compiler.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
#include "interp/interp.h"
#include <string.h>
#include <ctype.h>
#include <locale.h>
#if TARGET_OSX
# include <sys/resource.h>
#endif
static FILE *mini_stats_fd;
static void mini_usage (void);
static void mono_runtime_set_execution_mode (int mode);
static void mono_runtime_set_execution_mode_full (int mode, gboolean override);
static int mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]);
#ifdef HOST_WIN32
/* Need this to determine whether to detach console */
#include <mono/metadata/cil-coff.h>
/* This turns off command line globbing under win32 */
int _CRT_glob = 0;
#endif
typedef void (*OptFunc) (const char *p);
#undef OPTFLAG
// This, instead of an array of pointers, to optimize away a pointer and a relocation per string.
#define MSGSTRFIELD(line) MSGSTRFIELD1(line)
#define MSGSTRFIELD1(line) str##line
static const struct msgstr_t {
#define OPTFLAG(id,shift,name,desc) char MSGSTRFIELD(__LINE__) [sizeof (name) + sizeof (desc)];
#include "optflags-def.h"
#undef OPTFLAG
} opstr = {
#define OPTFLAG(id,shift,name,desc) name "\0" desc,
#include "optflags-def.h"
#undef OPTFLAG
};
static const gint16 opt_names [] = {
#define OPTFLAG(id,shift,name,desc) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
#include "optflags-def.h"
#undef OPTFLAG
};
#define optflag_get_name(id) ((const char*)&opstr + opt_names [(id)])
#define optflag_get_desc(id) (optflag_get_name(id) + 1 + strlen (optflag_get_name(id)))
#define DEFAULT_OPTIMIZATIONS ( \
MONO_OPT_PEEPHOLE | \
MONO_OPT_CFOLD | \
MONO_OPT_INLINE | \
MONO_OPT_CONSPROP | \
MONO_OPT_COPYPROP | \
MONO_OPT_DEADCE | \
MONO_OPT_BRANCH | \
MONO_OPT_LINEARS | \
MONO_OPT_INTRINS | \
MONO_OPT_LOOP | \
MONO_OPT_EXCEPTION | \
MONO_OPT_CMOV | \
MONO_OPT_GSHARED | \
MONO_OPT_SIMD | \
MONO_OPT_ALIAS_ANALYSIS | \
MONO_OPT_AOT)
#define EXCLUDED_FROM_ALL (MONO_OPT_PRECOMP | MONO_OPT_UNSAFE | MONO_OPT_GSHAREDVT)
static char *mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend);
static char *mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend);
static guint32
parse_optimizations (guint32 opt, const char* p, gboolean cpu_opts)
{
guint32 exclude = 0;
const char *n;
int i, invert;
char **parts, **ptr;
/* Initialize the hwcap module if necessary. */
mono_hwcap_init ();
/* call out to cpu detection code here that sets the defaults ... */
if (cpu_opts) {
#ifndef MONO_CROSS_COMPILE
opt |= mono_arch_cpu_optimizations (&exclude);
opt &= ~exclude;
#endif
}
if (!p)
return opt;
parts = g_strsplit (p, ",", -1);
for (ptr = parts; ptr && *ptr; ptr ++) {
char *arg = *ptr;
char *p = arg;
if (*p == '-') {
p++;
invert = TRUE;
} else {
invert = FALSE;
}
for (i = 0; i < G_N_ELEMENTS (opt_names) && optflag_get_name (i); ++i) {
n = optflag_get_name (i);
if (!strcmp (p, n)) {
if (invert)
opt &= ~ (1 << i);
else
opt |= 1 << i;
break;
}
}
if (i == G_N_ELEMENTS (opt_names) || !optflag_get_name (i)) {
if (strncmp (p, "all", 3) == 0) {
if (invert)
opt = 0;
else
opt = ~(EXCLUDED_FROM_ALL | exclude);
} else {
fprintf (stderr, "Invalid optimization name `%s'\n", p);
exit (1);
}
}
g_free (arg);
}
g_free (parts);
return opt;
}
static gboolean
parse_debug_options (const char* p)
{
MonoDebugOptions *opt = mini_get_debug_options ();
opt->enabled = TRUE;
do {
if (!*p) {
fprintf (stderr, "Syntax error; expected debug option name\n");
return FALSE;
}
if (!strncmp (p, "casts", 5)) {
opt->better_cast_details = TRUE;
p += 5;
} else if (!strncmp (p, "mdb-optimizations", 17)) {
opt->mdb_optimizations = TRUE;
p += 17;
} else if (!strncmp (p, "ignore", 6)) {
opt->enabled = FALSE;
p += 6;
} else {
fprintf (stderr, "Invalid debug option `%s', use --help-debug for details\n", p);
return FALSE;
}
if (*p == ',') {
p++;
if (!*p) {
fprintf (stderr, "Syntax error; expected debug option name\n");
return FALSE;
}
}
} while (*p);
return TRUE;
}
typedef struct {
char name [6];
char desc [18];
MonoGraphOptions value;
} GraphName;
static const GraphName
graph_names [] = {
{"cfg", "Control Flow", MONO_GRAPH_CFG},
{"dtree", "Dominator Tree", MONO_GRAPH_DTREE},
{"code", "CFG showing code", MONO_GRAPH_CFG_CODE},
{"ssa", "CFG after SSA", MONO_GRAPH_CFG_SSA},
{"optc", "CFG after IR opts", MONO_GRAPH_CFG_OPTCODE}
};
static MonoGraphOptions
mono_parse_graph_options (const char* p)
{
const char *n;
int i, len;
for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) {
n = graph_names [i].name;
len = strlen (n);
if (strncmp (p, n, len) == 0)
return graph_names [i].value;
}
fprintf (stderr, "Invalid graph name provided: %s\n", p);
exit (1);
}
/**
* mono_parse_default_optimizations:
*/
int
mono_parse_default_optimizations (const char* p)
{
guint32 opt;
opt = parse_optimizations (DEFAULT_OPTIMIZATIONS, p, TRUE);
return opt;
}
char*
mono_opt_descr (guint32 flags) {
GString *str = g_string_new ("");
int i;
gboolean need_comma;
need_comma = FALSE;
for (i = 0; i < G_N_ELEMENTS (opt_names); ++i) {
if (flags & (1 << i) && optflag_get_name (i)) {
if (need_comma)
g_string_append_c (str, ',');
g_string_append (str, optflag_get_name (i));
need_comma = TRUE;
}
}
return g_string_free (str, FALSE);
}
static const guint32
opt_sets [] = {
0,
MONO_OPT_PEEPHOLE,
MONO_OPT_BRANCH,
MONO_OPT_CFOLD,
MONO_OPT_FCMOV,
MONO_OPT_ALIAS_ANALYSIS,
#ifdef MONO_ARCH_SIMD_INTRINSICS
MONO_OPT_SIMD | MONO_OPT_INTRINS,
MONO_OPT_SSE2,
MONO_OPT_SIMD | MONO_OPT_SSE2 | MONO_OPT_INTRINS,
#endif
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS | MONO_OPT_ALIAS_ANALYSIS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_CFOLD,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_ALIAS_ANALYSIS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_TAILCALL,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_SSA,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_ABCREM,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_ABCREM,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV,
DEFAULT_OPTIMIZATIONS,
};
static const guint32
interp_opt_sets [] = {
INTERP_OPT_NONE,
INTERP_OPT_INLINE,
INTERP_OPT_CPROP,
INTERP_OPT_SUPER_INSTRUCTIONS,
INTERP_OPT_INLINE | INTERP_OPT_CPROP,
INTERP_OPT_INLINE | INTERP_OPT_SUPER_INSTRUCTIONS,
INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS,
INTERP_OPT_INLINE | INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS | INTERP_OPT_BBLOCKS,
};
static const char* const
interp_opflags_names [] = {
"inline",
"cprop",
"super-insn",
"bblocks"
};
static const char*
interp_optflag_get_name (guint32 i)
{
g_assert (i < G_N_ELEMENTS (interp_opflags_names));
return interp_opflags_names [i];
}
static char*
interp_opt_descr (guint32 flags)
{
GString *str = g_string_new ("");
int i;
gboolean need_comma;
need_comma = FALSE;
for (i = 0; i < G_N_ELEMENTS (interp_opflags_names); ++i) {
if (flags & (1 << i) && interp_optflag_get_name (i)) {
if (need_comma)
g_string_append_c (str, ',');
g_string_append (str, interp_optflag_get_name (i));
need_comma = TRUE;
}
}
return g_string_free (str, FALSE);
}
typedef int (*TestMethod) (void);
#if 0
static void
domain_dump_native_code (MonoDomain *domain) {
// need to poke into the domain, move to metadata/domain.c
// need to empty jit_info_table and code_mp
}
#endif
static gboolean do_regression_retries;
static int regression_test_skip_index;
static gboolean
method_should_be_regression_tested (MonoMethod *method, gboolean interp)
{
ERROR_DECL (error);
if (strncmp (method->name, "test_", 5) != 0)
return FALSE;
static gboolean filter_method_init = FALSE;
static const char *filter_method = NULL;
if (!filter_method_init) {
filter_method = g_getenv ("REGRESSION_FILTER_METHOD");
filter_method_init = TRUE;
}
if (filter_method) {
const char *name = filter_method;
if ((strchr (name, '.') > name) || strchr (name, ':')) {
MonoMethodDesc *desc = mono_method_desc_new (name, TRUE);
gboolean res = mono_method_desc_full_match (desc, method);
mono_method_desc_free (desc);
return res;
} else {
return strcmp (method->name, name) == 0;
}
}
MonoCustomAttrInfo* ainfo = mono_custom_attrs_from_method_checked (method, error);
mono_error_cleanup (error);
if (!ainfo)
return TRUE;
int j;
for (j = 0; j < ainfo->num_attrs; ++j) {
MonoCustomAttrEntry *centry = &ainfo->attrs [j];
if (centry->ctor == NULL)
continue;
MonoClass *klass = centry->ctor->klass;
if (strcmp (m_class_get_name (klass), "CategoryAttribute") || mono_method_signature_internal (centry->ctor)->param_count != 1)
continue;
gpointer *typed_args, *named_args;
int num_named_args;
CattrNamedArg *arginfo;
mono_reflection_create_custom_attr_data_args_noalloc (
mono_defaults.corlib, centry->ctor, centry->data, centry->data_size,
&typed_args, &named_args, &num_named_args, &arginfo, error);
if (!is_ok (error))
continue;
const char *arg = (const char*)typed_args [0];
mono_metadata_decode_value (arg, &arg);
char *utf8_str = (char*)arg; //this points into image memory that is constant
g_free (typed_args);
g_free (named_args);
g_free (arginfo);
if (interp && !strcmp (utf8_str, "!INTERPRETER")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
#if HOST_WASM
if (!strcmp (utf8_str, "!WASM")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
#endif
if (mono_aot_mode == MONO_AOT_MODE_FULL && !strcmp (utf8_str, "!FULLAOT")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
if ((mono_aot_mode == MONO_AOT_MODE_INTERP_LLVMONLY || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && !strcmp (utf8_str, "!BITCODE")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
}
return TRUE;
}
static void
mini_regression_step (MonoImage *image, int verbose, int *total_run, int *total,
guint32 opt_flags, GTimer *timer)
{
int result, expected, failed, cfailed, run, code_size;
double elapsed, comp_time, start_time;
char *n;
int i;
mono_set_defaults (verbose, opt_flags);
n = mono_opt_descr (opt_flags);
g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n);
g_free (n);
cfailed = failed = run = code_size = 0;
comp_time = elapsed = 0.0;
int local_skip_index = 0;
MonoJitMemoryManager *jit_mm = get_default_jit_mm ();
g_hash_table_destroy (jit_mm->jit_trampoline_hash);
jit_mm->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
mono_internal_hash_table_destroy (&(jit_mm->jit_code_hash));
mono_jit_code_hash_init (&(jit_mm->jit_code_hash));
g_timer_start (timer);
if (mini_stats_fd)
fprintf (mini_stats_fd, "[");
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
continue;
}
if (method_should_be_regression_tested (method, FALSE)) {
MonoCompile *cfg = NULL;
TestMethod func = NULL;
expected = atoi (method->name + 5);
run++;
start_time = g_timer_elapsed (timer, NULL);
#ifdef DISABLE_JIT
#ifdef MONO_USE_AOT_COMPILER
ERROR_DECL (error);
func = (TestMethod)mono_aot_get_method (method, error);
mono_error_cleanup (error);
#else
g_error ("No JIT or AOT available, regression testing not possible!");
#endif
#else
comp_time -= start_time;
cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, opt_flags), JIT_FLAG_RUN_CCTORS, 0, -1);
comp_time += g_timer_elapsed (timer, NULL);
if (cfg->exception_type == MONO_EXCEPTION_NONE) {
#ifdef MONO_USE_AOT_COMPILER
ERROR_DECL (error);
func = (TestMethod)mono_aot_get_method (method, error);
mono_error_cleanup (error);
if (!func) {
func = (TestMethod)MINI_ADDR_TO_FTNPTR (cfg->native_code);
}
#else
func = (TestMethod)(gpointer)cfg->native_code;
func = MINI_ADDR_TO_FTNPTR (func);
#endif
func = (TestMethod)mono_create_ftnptr ((gpointer)func);
}
#endif
if (func) {
if (do_regression_retries) {
++local_skip_index;
if(local_skip_index <= regression_test_skip_index)
continue;
++regression_test_skip_index;
}
if (verbose >= 2)
g_print ("Running '%s' ...\n", method->name);
#if HOST_WASM
//WASM AOT injects dummy args and we must call with exact signatures
int (*func_2)(int) = (int (*)(int))(void*)func;
result = func_2 (-1);
#else
result = func ();
#endif
if (result != expected) {
failed++;
g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected);
}
if (cfg) {
code_size += cfg->code_len;
mono_destroy_compile (cfg);
}
} else {
cfailed++;
g_print ("Test '%s' failed compilation.\n", method->name);
}
if (mini_stats_fd)
fprintf (mini_stats_fd, "%f, ",
g_timer_elapsed (timer, NULL) - start_time);
}
}
if (mini_stats_fd)
fprintf (mini_stats_fd, "],\n");
g_timer_stop (timer);
elapsed = g_timer_elapsed (timer, NULL);
if (failed > 0 || cfailed > 0){
g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n",
run, failed, cfailed, 100.0*(run-failed-cfailed)/run);
} else {
g_print ("Results: total tests: %d, all pass \n", run);
}
g_print ("Elapsed time: %f secs (%f, %f), Code size: %d\n\n", elapsed,
elapsed - comp_time, comp_time, code_size);
*total += failed + cfailed;
*total_run += run;
}
static int
mini_regression (MonoImage *image, int verbose, int *total_run)
{
guint32 i, opt;
MonoMethod *method;
char *n;
GTimer *timer = g_timer_new ();
guint32 exclude = 0;
int total;
/* Note: mono_hwcap_init () called in mono_init () before we get here. */
mono_arch_cpu_optimizations (&exclude);
if (mini_stats_fd) {
fprintf (mini_stats_fd, "$stattitle = \'Mono Benchmark Results (various optimizations)\';\n");
fprintf (mini_stats_fd, "$graph->set_legend(qw(");
for (opt = 0; opt < G_N_ELEMENTS (opt_sets); opt++) {
guint32 opt_flags = opt_sets [opt];
n = mono_opt_descr (opt_flags);
if (!n [0])
n = (char *)"none";
if (opt)
fprintf (mini_stats_fd, " ");
fprintf (mini_stats_fd, "%s", n);
}
fprintf (mini_stats_fd, "));\n");
fprintf (mini_stats_fd, "@data = (\n");
fprintf (mini_stats_fd, "[");
}
/* load the metadata */
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error);
continue;
}
mono_class_init_internal (method->klass);
if (!strncmp (method->name, "test_", 5) && mini_stats_fd) {
fprintf (mini_stats_fd, "\"%s\",", method->name);
}
}
if (mini_stats_fd)
fprintf (mini_stats_fd, "],\n");
total = 0;
*total_run = 0;
if (mono_do_single_method_regression) {
GSList *iter;
mini_regression_step (image, verbose, total_run, &total,
0, timer);
if (total)
return total;
g_print ("Single method regression: %d methods\n", g_slist_length (mono_single_method_list));
for (iter = mono_single_method_list; iter; iter = g_slist_next (iter)) {
char *method_name;
mono_current_single_method = (MonoMethod *)iter->data;
method_name = mono_method_full_name (mono_current_single_method, TRUE);
g_print ("Current single method: %s\n", method_name);
g_free (method_name);
mini_regression_step (image, verbose, total_run, &total,
0, timer);
if (total)
return total;
}
} else {
for (opt = 0; opt < G_N_ELEMENTS (opt_sets); ++opt) {
/* builtin-types.cs & aot-tests.cs need OPT_INTRINS enabled */
if (!strcmp ("builtin-types", image->assembly_name) || !strcmp ("aot-tests", image->assembly_name))
if (!(opt_sets [opt] & MONO_OPT_INTRINS))
continue;
//we running in AOT only, it makes no sense to try multiple flags
if ((mono_aot_mode == MONO_AOT_MODE_FULL || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && opt_sets [opt] != DEFAULT_OPTIMIZATIONS) {
continue;
}
mini_regression_step (image, verbose, total_run, &total,
opt_sets [opt] & ~exclude, timer);
}
}
if (mini_stats_fd) {
fprintf (mini_stats_fd, ");\n");
fflush (mini_stats_fd);
}
g_timer_destroy (timer);
return total;
}
static int
mini_regression_list (int verbose, int count, char *images [])
{
int i, total, total_run, run;
MonoAssembly *ass;
total_run = total = 0;
for (i = 0; i < count; ++i) {
MonoAssemblyOpenRequest req;
mono_assembly_request_prepare_open (&req, mono_alc_get_default ());
ass = mono_assembly_request_open (images [i], &req, NULL);
if (!ass) {
g_warning ("failed to load assembly: %s", images [i]);
continue;
}
total += mini_regression (mono_assembly_get_image_internal (ass), verbose, &run);
total_run += run;
}
if (total > 0){
g_print ("Overall results: tests: %d, failed: %d, opt combinations: %d (pass: %.2f%%)\n",
total_run, total, (int)G_N_ELEMENTS (opt_sets), 100.0*(total_run-total)/total_run);
} else {
g_print ("Overall results: tests: %d, 100%% pass, opt combinations: %d\n",
total_run, (int)G_N_ELEMENTS (opt_sets));
}
return total;
}
static void
interp_regression_step (MonoImage *image, int verbose, int *total_run, int *total, const guint32 *opt_flags, GTimer *timer)
{
int result, expected, failed, cfailed, run;
double elapsed, transform_time;
int i;
MonoObject *result_obj;
int local_skip_index = 0;
const char *n = NULL;
if (opt_flags) {
mini_get_interp_callbacks ()->set_optimizations (*opt_flags);
n = interp_opt_descr (*opt_flags);
} else {
n = mono_interp_opts_string;
}
g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n);
cfailed = failed = run = 0;
transform_time = elapsed = 0.0;
mini_get_interp_callbacks ()->invalidate_transformed ();
g_timer_start (timer);
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
continue;
}
if (method_should_be_regression_tested (method, TRUE)) {
ERROR_DECL (interp_error);
MonoObject *exc = NULL;
if (do_regression_retries) {
++local_skip_index;
if(local_skip_index <= regression_test_skip_index)
continue;
++regression_test_skip_index;
}
result_obj = mini_get_interp_callbacks ()->runtime_invoke (method, NULL, NULL, &exc, interp_error);
if (!is_ok (interp_error)) {
cfailed++;
g_print ("Test '%s' execution failed.\n", method->name);
} else if (exc != NULL) {
g_print ("Exception in Test '%s' occurred:\n", method->name);
mono_object_describe (exc);
run++;
failed++;
} else {
result = *(gint32 *) mono_object_unbox_internal (result_obj);
expected = atoi (method->name + 5); // FIXME: oh no.
run++;
if (result != expected) {
failed++;
g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected);
}
}
}
}
g_timer_stop (timer);
elapsed = g_timer_elapsed (timer, NULL);
if (failed > 0 || cfailed > 0){
g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n",
run, failed, cfailed, 100.0*(run-failed-cfailed)/run);
} else {
g_print ("Results: total tests: %d, all pass \n", run);
}
g_print ("Elapsed time: %f secs (%f, %f)\n\n", elapsed,
elapsed - transform_time, transform_time);
*total += failed + cfailed;
*total_run += run;
}
static int
interp_regression (MonoImage *image, int verbose, int *total_run)
{
MonoMethod *method;
GTimer *timer = g_timer_new ();
guint32 i;
int total;
/* load the metadata */
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error);
continue;
}
mono_class_init_internal (method->klass);
}
total = 0;
*total_run = 0;
if (mono_interp_opts_string) {
/* explicit option requested*/
interp_regression_step (image, verbose, total_run, &total, NULL, timer);
} else {
for (int opt = 0; opt < G_N_ELEMENTS (interp_opt_sets); ++opt)
interp_regression_step (image, verbose, total_run, &total, &interp_opt_sets [opt], timer);
}
g_timer_destroy (timer);
return total;
}
/* TODO: merge this code with the regression harness of the JIT */
static int
mono_interp_regression_list (int verbose, int count, char *images [])
{
int i, total, total_run, run;
total_run = total = 0;
for (i = 0; i < count; ++i) {
MonoAssemblyOpenRequest req;
mono_assembly_request_prepare_open (&req, mono_alc_get_default ());
MonoAssembly *ass = mono_assembly_request_open (images [i], &req, NULL);
if (!ass) {
g_warning ("failed to load assembly: %s", images [i]);
continue;
}
total += interp_regression (mono_assembly_get_image_internal (ass), verbose, &run);
total_run += run;
}
if (total > 0) {
g_print ("Overall results: tests: %d, failed: %d (pass: %.2f%%)\n", total_run, total, 100.0*(total_run-total)/total_run);
} else {
g_print ("Overall results: tests: %d, 100%% pass\n", total_run);
}
return total;
}
#ifdef MONO_JIT_INFO_TABLE_TEST
typedef struct _JitInfoData
{
guint start;
guint length;
MonoJitInfo *ji;
struct _JitInfoData *next;
} JitInfoData;
typedef struct
{
guint start;
guint length;
int num_datas;
JitInfoData *data;
} Region;
typedef struct
{
int num_datas;
int num_regions;
Region *regions;
int num_frees;
JitInfoData *frees;
} ThreadData;
static int num_threads;
static ThreadData *thread_datas;
static MonoDomain *test_domain;
static JitInfoData*
alloc_random_data (Region *region)
{
JitInfoData **data;
JitInfoData *prev;
guint prev_end;
guint next_start;
guint max_len;
JitInfoData *d;
int num_retries = 0;
int pos, i;
restart:
prev = NULL;
data = ®ion->data;
pos = random () % (region->num_datas + 1);
i = 0;
while (*data != NULL) {
if (i++ == pos)
break;
prev = *data;
data = &(*data)->next;
}
if (prev == NULL)
g_assert (*data == region->data);
else
g_assert (prev->next == *data);
if (prev == NULL)
prev_end = region->start;
else
prev_end = prev->start + prev->length;
if (*data == NULL)
next_start = region->start + region->length;
else
next_start = (*data)->start;
g_assert (prev_end <= next_start);
max_len = next_start - prev_end;
if (max_len < 128) {
if (++num_retries >= 10)
return NULL;
goto restart;
}
if (max_len > 1024)
max_len = 1024;
d = g_new0 (JitInfoData, 1);
d->start = prev_end + random () % (max_len / 2);
d->length = random () % MIN (max_len, next_start - d->start) + 1;
g_assert (d->start >= prev_end && d->start + d->length <= next_start);
d->ji = g_new0 (MonoJitInfo, 1);
d->ji->d.method = (MonoMethod*) 0xABadBabe;
d->ji->code_start = (gpointer)(gulong) d->start;
d->ji->code_size = d->length;
d->ji->cas_inited = 1; /* marks an allocated jit info */
d->next = *data;
*data = d;
++region->num_datas;
return d;
}
static JitInfoData**
choose_random_data (Region *region)
{
int n;
int i;
JitInfoData **d;
g_assert (region->num_datas > 0);
n = random () % region->num_datas;
for (d = ®ion->data, i = 0;
i < n;
d = &(*d)->next, ++i)
;
return d;
}
static Region*
choose_random_region (ThreadData *td)
{
return &td->regions [random () % td->num_regions];
}
static ThreadData*
choose_random_thread (void)
{
return &thread_datas [random () % num_threads];
}
static void
free_jit_info_data (ThreadData *td, JitInfoData *free)
{
free->next = td->frees;
td->frees = free;
if (++td->num_frees >= 1000) {
int i;
for (i = 0; i < 500; ++i)
free = free->next;
while (free->next != NULL) {
JitInfoData *next = free->next->next;
//g_free (free->next->ji);
g_free (free->next);
free->next = next;
--td->num_frees;
}
}
}
#define NUM_THREADS 8
#define REGIONS_PER_THREAD 10
#define REGION_SIZE 0x10000
#define MAX_ADDR (REGION_SIZE * REGIONS_PER_THREAD * NUM_THREADS)
#define MODE_ALLOC 1
#define MODE_FREE 2
static void
test_thread_func (gpointer void_arg)
{
ThreadData* td = (ThreadData*)void_arg;
int mode = MODE_ALLOC;
int i = 0;
gulong lookup_successes = 0, lookup_failures = 0;
int thread_num = (int)(td - thread_datas);
gboolean modify_thread = thread_num < NUM_THREADS / 2; /* only half of the threads modify the table */
for (;;) {
int alloc;
int lookup = 1;
if (td->num_datas == 0) {
lookup = 0;
alloc = 1;
} else if (modify_thread && random () % 1000 < 5) {
lookup = 0;
if (mode == MODE_ALLOC)
alloc = (random () % 100) < 70;
else if (mode == MODE_FREE)
alloc = (random () % 100) < 30;
}
if (lookup) {
/* modify threads sometimes look up their own jit infos */
if (modify_thread && random () % 10 < 5) {
Region *region = choose_random_region (td);
if (region->num_datas > 0) {
JitInfoData **data = choose_random_data (region);
guint pos = (*data)->start + random () % (*data)->length;
MonoJitInfo *ji;
ji = mono_jit_info_table_find_internal ((char*)(gsize)pos, TRUE, FALSE);
g_assert (ji->cas_inited);
g_assert ((*data)->ji == ji);
}
} else {
int pos = random () % MAX_ADDR;
char *addr = (char*)(uintptr_t)pos;
MonoJitInfo *ji;
ji = mono_jit_info_table_find_internal (addr, TRUE, FALSE);
/*
* FIXME: We are actually not allowed
* to do this. By the time we examine
* the ji another thread might already
* have removed it.
*/
if (ji != NULL) {
g_assert (addr >= (char*)ji->code_start && addr < (char*)ji->code_start + ji->code_size);
++lookup_successes;
} else
++lookup_failures;
}
} else if (alloc) {
JitInfoData *data = alloc_random_data (choose_random_region (td));
if (data != NULL) {
mono_jit_info_table_add (domain, data->ji);
++td->num_datas;
}
} else {
Region *region = choose_random_region (td);
if (region->num_datas > 0) {
JitInfoData **data = choose_random_data (region);
JitInfoData *free;
mono_jit_info_table_remove (domain, (*data)->ji);
//(*data)->ji->cas_inited = 0; /* marks a free jit info */
free = *data;
*data = (*data)->next;
free_jit_info_data (td, free);
--region->num_datas;
--td->num_datas;
}
}
if (++i % 100000 == 0) {
int j;
g_print ("num datas %d (%ld - %ld): %d", (int)(td - thread_datas),
lookup_successes, lookup_failures, td->num_datas);
for (j = 0; j < td->num_regions; ++j)
g_print (" %d", td->regions [j].num_datas);
g_print ("\n");
}
if (td->num_datas < 100)
mode = MODE_ALLOC;
else if (td->num_datas > 2000)
mode = MODE_FREE;
}
}
/*
static void
small_id_thread_func (gpointer arg)
{
MonoThread *thread = mono_thread_current ();
MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
g_print ("my small id is %d\n", (int)thread->small_id);
mono_hazard_pointer_clear (hp, 1);
sleep (3);
g_print ("done %d\n", (int)thread->small_id);
}
*/
static void
jit_info_table_test (MonoDomain *domain)
{
ERROR_DECL (error);
int i;
g_print ("testing jit_info_table\n");
num_threads = NUM_THREADS;
thread_datas = g_new0 (ThreadData, num_threads);
for (i = 0; i < num_threads; ++i) {
int j;
thread_datas [i].num_regions = REGIONS_PER_THREAD;
thread_datas [i].regions = g_new0 (Region, REGIONS_PER_THREAD);
for (j = 0; j < REGIONS_PER_THREAD; ++j) {
thread_datas [i].regions [j].start = (num_threads * j + i) * REGION_SIZE;
thread_datas [i].regions [j].length = REGION_SIZE;
}
}
test_domain = domain;
/*
for (i = 0; i < 72; ++i)
mono_thread_create (small_id_thread_func, NULL);
sleep (2);
*/
for (i = 0; i < num_threads; ++i) {
mono_thread_create_checked ((MonoThreadStart)test_thread_func, &thread_datas [i], error);
mono_error_assert_ok (error);
}
}
#endif
enum {
DO_BENCH,
DO_REGRESSION,
DO_SINGLE_METHOD_REGRESSION,
DO_COMPILE,
DO_EXEC,
DO_DRAW,
DO_DEBUGGER
};
typedef struct CompileAllThreadArgs {
MonoAssembly *ass;
int verbose;
guint32 opts;
guint32 recompilation_times;
} CompileAllThreadArgs;
static void
compile_all_methods_thread_main_inner (CompileAllThreadArgs *args)
{
MonoAssembly *ass = args->ass;
int verbose = args->verbose;
MonoImage *image = mono_assembly_get_image_internal (ass);
MonoMethod *method;
MonoCompile *cfg;
int i, count = 0, fail_count = 0;
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
guint32 token = MONO_TOKEN_METHOD_DEF | (i + 1);
MonoMethodSignature *sig;
if (mono_metadata_has_generic_params (image, token))
continue;
method = mono_get_method_checked (image, token, NULL, NULL, error);
if (!method) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
continue;
}
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
(method->flags & METHOD_ATTRIBUTE_ABSTRACT))
continue;
if (mono_class_is_gtd (method->klass))
continue;
sig = mono_method_signature_internal (method);
if (!sig) {
char * desc = mono_method_full_name (method, TRUE);
g_print ("Could not retrieve method signature for %s\n", desc);
g_free (desc);
fail_count ++;
continue;
}
if (sig->has_type_parameters)
continue;
count++;
if (verbose) {
char * desc = mono_method_full_name (method, TRUE);
g_print ("Compiling %d %s\n", count, desc);
g_free (desc);
}
if (mono_use_interpreter) {
mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error);
// FIXME There are a few failures due to DllNotFoundException related to System.Native
if (verbose && !is_ok (error))
g_print ("Compilation of %s failed\n", mono_method_full_name (method, TRUE));
} else {
cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, args->opts), (JitFlags)JIT_FLAG_DISCARD_RESULTS, 0, -1);
if (cfg->exception_type != MONO_EXCEPTION_NONE) {
const char *msg = cfg->exception_message;
if (cfg->exception_type == MONO_EXCEPTION_MONO_ERROR)
msg = mono_error_get_message (cfg->error);
g_print ("Compilation of %s failed with exception '%s':\n", mono_method_full_name (cfg->method, TRUE), msg);
fail_count ++;
}
mono_destroy_compile (cfg);
}
}
if (fail_count)
exit (1);
}
static void
compile_all_methods_thread_main (gpointer void_args)
{
CompileAllThreadArgs *args = (CompileAllThreadArgs*)void_args;
guint32 i;
for (i = 0; i < args->recompilation_times; ++i)
compile_all_methods_thread_main_inner (args);
}
static void
compile_all_methods (MonoAssembly *ass, int verbose, guint32 opts, guint32 recompilation_times)
{
ERROR_DECL (error);
CompileAllThreadArgs args;
args.ass = ass;
args.verbose = verbose;
args.opts = opts;
args.recompilation_times = recompilation_times;
/*
* Need to create a mono thread since compilation might trigger
* running of managed code.
*/
mono_thread_create_checked ((MonoThreadStart)compile_all_methods_thread_main, &args, error);
mono_error_assert_ok (error);
mono_thread_manage_internal ();
}
/**
* mono_jit_exec:
* \param assembly reference to an assembly
* \param argc argument count
* \param argv argument vector
* Start execution of a program.
*/
int
mono_jit_exec (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[])
{
int rv;
MONO_ENTER_GC_UNSAFE;
rv = mono_jit_exec_internal (domain, assembly, argc, argv);
MONO_EXIT_GC_UNSAFE;
return rv;
}
int
mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[])
{
MONO_REQ_GC_UNSAFE_MODE;
ERROR_DECL (error);
MonoImage *image = mono_assembly_get_image_internal (assembly);
// We need to ensure that any module cctor for this image
// is run *before* we invoke the entry point
// For more information, see https://blogs.msdn.microsoft.com/junfeng/2005/11/19/module-initializer-a-k-a-module-constructor/
//
// This is required in order for tools like Costura
// (https://github.com/Fody/Costura) to work properly, as they inject
// a module initializer which sets up event handlers (e.g. AssemblyResolve)
// that allow the main method to run properly
if (!mono_runtime_run_module_cctor(image, error)) {
g_print ("Failed to run module constructor due to %s\n", mono_error_get_message (error));
return 1;
}
MonoMethod *method;
guint32 entry = mono_image_get_entry_point (image);
if (!entry) {
g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image));
/* FIXME: remove this silly requirement. */
mono_environment_exitcode_set (1);
return 1;
}
method = mono_get_method_checked (image, entry, NULL, NULL, error);
if (method == NULL){
g_print ("The entry point method could not be loaded due to %s\n", mono_error_get_message (error));
mono_error_cleanup (error);
mono_environment_exitcode_set (1);
return 1;
}
if (mono_llvm_only) {
MonoObject *exc = NULL;
int res;
res = mono_runtime_try_run_main (method, argc, argv, &exc);
if (exc) {
mono_unhandled_exception_internal (exc);
mono_invoke_unhandled_exception_hook (exc);
g_assert_not_reached ();
}
return res;
} else {
int res = mono_runtime_run_main_checked (method, argc, argv, error);
if (!is_ok (error)) {
MonoException *ex = mono_error_convert_to_exception (error);
if (ex) {
mono_unhandled_exception_internal (&ex->object);
mono_invoke_unhandled_exception_hook (&ex->object);
g_assert_not_reached ();
}
}
return res;
}
}
typedef struct
{
MonoDomain *domain;
const char *file;
int argc;
char **argv;
guint32 opts;
char *aot_options;
} MainThreadArgs;
static void main_thread_handler (gpointer user_data)
{
MainThreadArgs *main_args = (MainThreadArgs *)user_data;
MonoAssembly *assembly;
if (mono_compile_aot) {
int i, res;
gpointer *aot_state = NULL;
/* Treat the other arguments as assemblies to compile too */
for (i = 0; i < main_args->argc; ++i) {
assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->argv [i]);
if (!assembly) {
fprintf (stderr, "Can not open image %s\n", main_args->argv [i]);
exit (1);
}
/* Check that the assembly loaded matches the filename */
{
MonoImageOpenStatus status;
MonoImage *img;
img = mono_image_open (main_args->argv [i], &status);
if (img && strcmp (img->name, assembly->image->name)) {
fprintf (stderr, "Error: Loaded assembly '%s' doesn't match original file name '%s'. Set MONO_PATH to the assembly's location.\n", assembly->image->name, img->name);
exit (1);
}
}
res = mono_compile_assembly (assembly, main_args->opts, main_args->aot_options, &aot_state);
if (res != 0) {
fprintf (stderr, "AOT of image %s failed.\n", main_args->argv [i]);
exit (1);
}
}
if (aot_state) {
res = mono_compile_deferred_assemblies (main_args->opts, main_args->aot_options, &aot_state);
if (res != 0) {
fprintf (stderr, "AOT of mode-specific deferred assemblies failed.\n");
exit (1);
}
}
} else {
assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->file);
if (!assembly){
fprintf (stderr, "Can not open image %s\n", main_args->file);
exit (1);
}
/*
* This must be done in a thread managed by mono since it can invoke
* managed code.
*/
if (main_args->opts & MONO_OPT_PRECOMP)
mono_precompile_assemblies ();
mono_jit_exec (main_args->domain, assembly, main_args->argc, main_args->argv);
}
}
static int
load_agent (MonoDomain *domain, char *desc)
{
ERROR_DECL (error);
char* col = strchr (desc, ':');
char *agent, *args;
MonoAssembly *agent_assembly;
MonoImage *image;
MonoMethod *method;
guint32 entry;
MonoArray *main_args;
gpointer pa [1];
MonoImageOpenStatus open_status;
if (col) {
agent = (char *)g_memdup (desc, col - desc + 1);
agent [col - desc] = '\0';
args = col + 1;
} else {
agent = g_strdup (desc);
args = NULL;
}
MonoAssemblyOpenRequest req;
mono_assembly_request_prepare_open (&req, mono_alc_get_default ());
agent_assembly = mono_assembly_request_open (agent, &req, &open_status);
if (!agent_assembly) {
fprintf (stderr, "Cannot open agent assembly '%s': %s.\n", agent, mono_image_strerror (open_status));
g_free (agent);
return 2;
}
/*
* Can't use mono_jit_exec (), as it sets things which might confuse the
* real Main method.
*/
image = mono_assembly_get_image_internal (agent_assembly);
entry = mono_image_get_entry_point (image);
if (!entry) {
g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image));
g_free (agent);
return 1;
}
method = mono_get_method_checked (image, entry, NULL, NULL, error);
if (method == NULL){
g_print ("The entry point method of assembly '%s' could not be loaded due to %s\n", agent, mono_error_get_message (error));
mono_error_cleanup (error);
g_free (agent);
return 1;
}
mono_thread_set_main (mono_thread_current ());
if (args) {
main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 1, error);
if (main_args) {
MonoString *str = mono_string_new_checked (args, error);
if (str)
mono_array_set_internal (main_args, MonoString*, 0, str);
}
} else {
main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 0, error);
}
if (!main_args) {
g_print ("Could not allocate array for main args of assembly '%s' due to %s\n", agent, mono_error_get_message (error));
mono_error_cleanup (error);
g_free (agent);
return 1;
}
pa [0] = main_args;
/* Pass NULL as 'exc' so unhandled exceptions abort the runtime */
mono_runtime_invoke_checked (method, NULL, pa, error);
if (!is_ok (error)) {
g_print ("The entry point method of assembly '%s' could not execute due to %s\n", agent, mono_error_get_message (error));
mono_error_cleanup (error);
g_free (agent);
return 1;
}
g_free (agent);
return 0;
}
static void
mini_usage_jitdeveloper (void)
{
int i;
fprintf (stdout,
"Runtime and JIT debugging options:\n"
" --apply-bindings=FILE Apply assembly bindings from FILE (only for AOT)\n"
" --breakonex Inserts a breakpoint on exceptions\n"
" --break METHOD Inserts a breakpoint at METHOD entry\n"
" --break-at-bb METHOD N Inserts a breakpoint in METHOD at BB N\n"
" --compile METHOD Just compile METHOD in assembly\n"
" --compile-all=N Compiles all the methods in the assembly multiple times (default: 1)\n"
" --ncompile N Number of times to compile METHOD (default: 1)\n"
" --print-vtable Print the vtable of all used classes\n"
" --regression Runs the regression test contained in the assembly\n"
" --single-method=OPTS Runs regressions with only one method optimized with OPTS at any time\n"
" --statfile FILE Sets the stat file to FILE\n"
" --stats Print statistics about the JIT operations\n"
" --inject-async-exc METHOD OFFSET Inject an asynchronous exception at METHOD\n"
" --verify-all Run the verifier on all assemblies and methods\n"
" --full-aot Avoid JITting any code\n"
" --llvmonly Use LLVM compiled code only\n"
" --agent=ASSEMBLY[:ARG] Loads the specific agent assembly and executes its Main method with the given argument before loading the main assembly.\n"
" --no-x86-stack-align Don't align stack on x86\n"
"\n"
"The options supported by MONO_DEBUG can also be passed on the command line.\n"
"\n"
"Other options:\n"
" --graph[=TYPE] METHOD Draws a graph of the specified method:\n");
for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) {
fprintf (stdout, " %-10s %s\n", graph_names [i].name, graph_names [i].desc);
}
}
static void
mini_usage_list_opt (void)
{
int i;
for (i = 0; i < G_N_ELEMENTS (opt_names); ++i)
fprintf (stdout, " %-10s %s\n", optflag_get_name (i), optflag_get_desc (i));
}
static void
mini_usage (void)
{
fprintf (stdout,
"Usage is: mono [options] program [program-options]\n"
"\n"
"Development:\n"
" --aot[=<options>] Compiles the assembly to native code\n"
" --debug=ignore Disable debugging support (on by default)\n"
" --debug=[<options>] Disable debugging support or enable debugging extras, use --help-debug for details\n"
" --debugger-agent=options Enable the debugger agent\n"
" --profile[=profiler] Runs in profiling mode with the specified profiler module\n"
" --trace[=EXPR] Enable tracing, use --help-trace for details\n"
#ifdef __linux__
" --jitmap Output a jit method map to /tmp/perf-PID.map\n"
#endif
#ifdef ENABLE_JIT_DUMP
" --jitdump Output a jitdump file to /tmp/jit-PID.dump\n"
#endif
" --help-devel Shows more options available to developers\n"
"\n"
"Runtime:\n"
" --config FILE Loads FILE as the Mono config\n"
" --verbose, -v Increases the verbosity level\n"
" --help, -h Show usage information\n"
" --version, -V Show version information\n"
" --version=number Show version number\n"
" --runtime=VERSION Use the VERSION runtime, instead of autodetecting\n"
" --optimize=OPT Turns on or off a specific optimization\n"
" Use --list-opt to get a list of optimizations\n"
" --attach=OPTIONS Pass OPTIONS to the attach agent in the runtime.\n"
" Currently the only supported option is 'disable'.\n"
" --llvm, --nollvm Controls whenever the runtime uses LLVM to compile code.\n"
" --gc=[sgen,boehm] Select SGen or Boehm GC (runs mono or mono-sgen)\n"
#ifdef TARGET_OSX
" --arch=[32,64] Select architecture (runs mono32 or mono64)\n"
#endif
#ifdef HOST_WIN32
" --mixed-mode Enable mixed-mode image support.\n"
#endif
" --handlers Install custom handlers, use --help-handlers for details.\n"
" --aot-path=PATH List of additional directories to search for AOT images.\n"
);
g_print ("\nOptions:\n");
mono_options_print_usage ();
}
static void
mini_trace_usage (void)
{
fprintf (stdout,
"Tracing options:\n"
" --trace[=EXPR] Trace every call, optional EXPR controls the scope\n"
"\n"
"EXPR is composed of:\n"
" all All assemblies\n"
" none No assemblies\n"
" program Entry point assembly\n"
" assembly Specifies an assembly\n"
" wrapper All wrappers bridging native and managed code\n"
" M:Type:Method Specifies a method\n"
" N:Namespace Specifies a namespace\n"
" T:Type Specifies a type\n"
" E:Type Specifies stack traces for an exception type\n"
" EXPR Includes expression\n"
" -EXPR Excludes expression\n"
" EXPR,EXPR Multiple expressions\n"
" disabled Don't print any output until toggled via SIGUSR2\n");
}
static void
mini_debug_usage (void)
{
fprintf (stdout,
"Debugging options:\n"
" --debug[=OPTIONS] Disable debugging support or enable debugging extras, optional OPTIONS is a comma\n"
" separated list of options\n"
"\n"
"OPTIONS is composed of:\n"
" ignore Disable debugging support (on by default).\n"
" casts Enable more detailed InvalidCastException messages.\n"
" mdb-optimizations Disable some JIT optimizations which are normally\n"
" disabled when running inside the debugger.\n"
" This is useful if you plan to attach to the running\n"
" process with the debugger.\n");
}
#if defined(MONO_ARCH_ARCHITECTURE)
/* Redefine MONO_ARCHITECTURE to include more information */
#undef MONO_ARCHITECTURE
#define MONO_ARCHITECTURE MONO_ARCH_ARCHITECTURE
#endif
static char *
mono_get_version_info (void)
{
GString *output;
output = g_string_new ("");
#ifdef MONO_KEYWORD_THREAD
g_string_append_printf (output, "\tTLS: __thread\n");
#else
g_string_append_printf (output, "\tTLS: \n");
#endif /* MONO_KEYWORD_THREAD */
#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
g_string_append_printf (output, "\tSIGSEGV: altstack\n");
#else
g_string_append_printf (output, "\tSIGSEGV: normal\n");
#endif
#ifdef HAVE_EPOLL
g_string_append_printf (output, "\tNotifications: epoll\n");
#elif defined(HAVE_KQUEUE)
g_string_append_printf (output, "\tNotification: kqueue\n");
#else
g_string_append_printf (output, "\tNotification: Thread + polling\n");
#endif
g_string_append_printf (output, "\tArchitecture: %s\n", MONO_ARCHITECTURE);
g_string_append_printf (output, "\tDisabled: %s\n", DISABLED_FEATURES);
g_string_append_printf (output, "\tMisc: ");
#ifdef MONO_SMALL_CONFIG
g_string_append_printf (output, "smallconfig ");
#endif
#ifdef MONO_BIG_ARRAYS
g_string_append_printf (output, "bigarrays ");
#endif
#if !defined(DISABLE_SDB)
g_string_append_printf (output, "softdebug ");
#endif
g_string_append_printf (output, "\n");
#ifndef DISABLE_INTERPRETER
g_string_append_printf (output, "\tInterpreter: yes\n");
#else
g_string_append_printf (output, "\tInterpreter: no\n");
#endif
#ifdef MONO_ARCH_LLVM_SUPPORTED
#ifdef ENABLE_LLVM
g_string_append_printf (output, "\tLLVM: yes(%d)\n", LLVM_API_VERSION);
#else
g_string_append_printf (output, "\tLLVM: supported, not enabled.\n");
#endif
#endif
mono_threads_suspend_policy_init ();
g_string_append_printf (output, "\tSuspend: %s\n", mono_threads_suspend_policy_name (mono_threads_suspend_policy ()));
return g_string_free (output, FALSE);
}
#ifndef MONO_ARCH_AOT_SUPPORTED
#define error_if_aot_unsupported() do {fprintf (stderr, "AOT compilation is not supported on this platform.\n"); exit (1);} while (0)
#else
#define error_if_aot_unsupported()
#endif
static gboolean enable_debugging;
static void
enable_runtime_stats (void)
{
mono_counters_enable (-1);
mono_atomic_store_bool (&mono_stats.enabled, TRUE);
mono_atomic_store_bool (&mono_jit_stats.enabled, TRUE);
}
static MonoMethodDesc *
parse_qualified_method_name (char *method_name)
{
if (strlen (method_name) == 0) {
g_printerr ("Couldn't parse empty method name.");
exit (1);
}
MonoMethodDesc *result = mono_method_desc_new (method_name, TRUE);
if (!result) {
g_printerr ("Couldn't parse method name: %s\n", method_name);
exit (1);
}
return result;
}
/**
* mono_jit_parse_options:
*
* Process the command line options in \p argv as done by the runtime executable.
* This should be called before \c mono_jit_init.
*/
void
mono_jit_parse_options (int argc, char * argv[])
{
int i;
char *trace_options = NULL;
int mini_verbose_level = 0;
guint32 opt;
/*
* Some options have no effect here, since they influence the behavior of
* mono_main ().
*/
opt = mono_parse_default_optimizations (NULL);
/* FIXME: Avoid code duplication */
for (i = 0; i < argc; ++i) {
if (argv [i] [0] != '-')
break;
if (strncmp (argv [i], "--debugger-agent=", 17) == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
mono_debugger_agent_parse_options (g_strdup (argv [i] + 17));
opt->mdb_optimizations = TRUE;
enable_debugging = TRUE;
} else if (!strcmp (argv [i], "--soft-breakpoints")) {
MonoDebugOptions *opt = mini_get_debug_options ();
opt->soft_breakpoints = TRUE;
opt->explicit_null_checks = TRUE;
} else if (strncmp (argv [i], "--optimize=", 11) == 0) {
opt = parse_optimizations (opt, argv [i] + 11, TRUE);
mono_set_optimizations (opt);
} else if (strncmp (argv [i], "-O=", 3) == 0) {
opt = parse_optimizations (opt, argv [i] + 3, TRUE);
mono_set_optimizations (opt);
} else if (strcmp (argv [i], "--trace") == 0) {
trace_options = (char*)"";
} else if (strncmp (argv [i], "--trace=", 8) == 0) {
trace_options = &argv [i][8];
} else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) {
mini_verbose_level++;
} else if (strcmp (argv [i], "--breakonex") == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
opt->break_on_exc = TRUE;
} else if (strcmp (argv [i], "--stats") == 0) {
enable_runtime_stats ();
} else if (strncmp (argv [i], "--stats=", 8) == 0) {
enable_runtime_stats ();
if (mono_stats_method_desc)
g_free (mono_stats_method_desc);
mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8);
} else if (strcmp (argv [i], "--break") == 0) {
if (i+1 >= argc){
fprintf (stderr, "Missing method name in --break command line option\n");
exit (1);
}
if (!mono_debugger_insert_breakpoint (argv [++i], FALSE))
fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]);
} else if (strncmp (argv[i], "--gc-params=", 12) == 0) {
mono_gc_params_set (argv[i] + 12);
} else if (strncmp (argv[i], "--gc-debug=", 11) == 0) {
mono_gc_debug_set (argv[i] + 11);
} else if (strcmp (argv [i], "--llvm") == 0) {
#ifndef MONO_ARCH_LLVM_SUPPORTED
fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n");
#elif !defined(ENABLE_LLVM)
fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n");
#else
mono_use_llvm = TRUE;
#endif
} else if (strcmp (argv [i], "--profile") == 0) {
mini_add_profiler_argument (NULL);
} else if (strncmp (argv [i], "--profile=", 10) == 0) {
mini_add_profiler_argument (argv [i] + 10);
} else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) {
} else {
fprintf (stderr, "Unsupported command line option: '%s'\n", argv [i]);
exit (1);
}
}
if (trace_options != NULL) {
/*
* Need to call this before mini_init () so we can trace methods
* compiled there too.
*/
mono_jit_trace_calls = mono_trace_set_options (trace_options);
if (mono_jit_trace_calls == NULL)
exit (1);
}
if (mini_verbose_level)
mono_set_verbose_level (mini_verbose_level);
}
static void
mono_set_use_smp (int use_smp)
{
#if HAVE_SCHED_SETAFFINITY
if (!use_smp) {
unsigned long proc_mask = 1;
#ifdef GLIBC_BEFORE_2_3_4_SCHED_SETAFFINITY
sched_setaffinity (getpid(), (gpointer)&proc_mask);
#else
sched_setaffinity (getpid(), sizeof (unsigned long), (const cpu_set_t *)&proc_mask);
#endif
}
#endif
}
static void
switch_gc (char* argv[], const char* target_gc)
{
GString *path;
if (!strcmp (mono_gc_get_gc_name (), target_gc)) {
return;
}
path = g_string_new (argv [0]);
/*Running mono without any argument*/
if (strstr (argv [0], "-sgen"))
g_string_truncate (path, path->len - 5);
else if (strstr (argv [0], "-boehm"))
g_string_truncate (path, path->len - 6);
g_string_append_c (path, '-');
g_string_append (path, target_gc);
#ifdef HAVE_EXECVP
execvp (path->str, argv);
fprintf (stderr, "Error: Failed to switch to %s gc. mono-%s is not installed.\n", target_gc, target_gc);
#else
fprintf (stderr, "Error: --gc=<NAME> option not supported on this platform.\n");
#endif
}
#ifdef TARGET_OSX
/*
* tries to increase the minimum number of files, if the number is below 1024
*/
static void
darwin_change_default_file_handles ()
{
struct rlimit limit;
if (getrlimit (RLIMIT_NOFILE, &limit) == 0){
if (limit.rlim_cur < 1024){
limit.rlim_cur = MAX(1024,limit.rlim_cur);
setrlimit (RLIMIT_NOFILE, &limit);
}
}
}
static void
switch_arch (char* argv[], const char* target_arch)
{
GString *path;
gsize arch_offset;
if ((strcmp (target_arch, "32") == 0 && strcmp (MONO_ARCHITECTURE, "x86") == 0) ||
(strcmp (target_arch, "64") == 0 && strcmp (MONO_ARCHITECTURE, "amd64") == 0)) {
return; /* matching arch loaded */
}
path = g_string_new (argv [0]);
arch_offset = path->len -2; /* last two characters */
/* Remove arch suffix if present */
if (strstr (&path->str[arch_offset], "32") || strstr (&path->str[arch_offset], "64")) {
g_string_truncate (path, arch_offset);
}
g_string_append (path, target_arch);
if (execvp (path->str, argv) < 0) {
fprintf (stderr, "Error: --arch=%s Failed to switch to '%s'.\n", target_arch, path->str);
exit (1);
}
}
#endif
#define MONO_HANDLERS_ARGUMENT "--handlers="
#define MONO_HANDLERS_ARGUMENT_LEN STRING_LENGTH(MONO_HANDLERS_ARGUMENT)
static void
apply_root_domain_configuration_file_bindings (MonoDomain *domain, char *root_domain_configuration_file)
{
g_assert_not_reached ();
}
static void
mono_check_interp_supported (void)
{
#ifdef MONO_CROSS_COMPILE
g_error ("--interpreter on cross-compile runtimes not supported\n");
#endif
#ifndef MONO_ARCH_INTERPRETER_SUPPORTED
g_error ("--interpreter not supported on this architecture.\n");
#endif
}
static int
mono_exec_regression_internal (int verbose_level, int count, char *images [], gboolean single_method)
{
mono_do_single_method_regression = single_method;
if (mono_use_interpreter) {
if (mono_interp_regression_list (verbose_level, count, images)) {
g_print ("Regression ERRORS!\n");
return 1;
}
return 0;
}
if (mini_regression_list (verbose_level, count, images)) {
g_print ("Regression ERRORS!\n");
return 1;
}
return 0;
}
/**
* Returns TRUE for success, FALSE for failure.
*/
gboolean
mono_regression_test_step (int verbose_level, const char *image, const char *method_name)
{
if (method_name) {
//TODO
} else {
do_regression_retries = TRUE;
}
char *images[] = {
(char*)image,
NULL
};
return mono_exec_regression_internal (verbose_level, 1, images, FALSE) == 0;
}
#ifdef ENABLE_ICALL_SYMBOL_MAP
/* Print the icall table as JSON */
static void
print_icall_table (void)
{
// We emit some dummy values to make the code simpler
printf ("[\n{ \"klass\": \"\", \"icalls\": [");
#define NOHANDLES(inner) inner
#define HANDLES(id, name, func, ...) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s_raw\", \"handles\": true }\n", name, #func);
#define HANDLES_REUSE_WRAPPER HANDLES
#define MONO_HANDLE_REGISTER_ICALL(...) /* nothing */
#define ICALL_TYPE(id,name,first) printf ("]},\n { \"klass\":\"%s\", \"icalls\": [{} ", name);
#define ICALL(id,name,func) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s\", \"handles\": false }\n", name, #func);
#include <mono/metadata/icall-def.h>
printf ("]}\n]\n");
}
#endif
/**
* mono_main:
* \param argc number of arguments in the argv array
* \param argv array of strings containing the startup arguments
* Launches the Mono JIT engine and parses all the command line options
* in the same way that the mono command line VM would.
*/
int
mono_main (int argc, char* argv[])
{
MainThreadArgs main_args;
MonoAssembly *assembly;
MonoMethodDesc *desc;
MonoMethod *method;
MonoDomain *domain;
MonoImageOpenStatus open_status;
const char* aname, *mname = NULL;
int i;
#ifndef DISABLE_JIT
int count = 1;
MonoGraphOptions mono_graph_options = (MonoGraphOptions)0;
#endif
guint32 opt, action = DO_EXEC, recompilation_times = 1;
int mini_verbose_level = 0;
char *trace_options = NULL;
char *aot_options = NULL;
char *forced_version = NULL;
GPtrArray *agents = NULL;
char *extra_bindings_config_file = NULL;
#ifdef MONO_JIT_INFO_TABLE_TEST
int test_jit_info_table = FALSE;
#endif
#ifdef HOST_WIN32
int mixed_mode = FALSE;
#endif
ERROR_DECL (error);
#ifdef MOONLIGHT
#ifndef HOST_WIN32
/* stdout defaults to block buffering if it's not writing to a terminal, which
* happens with our test harness: we redirect stdout to capture it. Force line
* buffering in all cases. */
setlinebuf (stdout);
#endif
#endif
setlocale (LC_ALL, "");
#if TARGET_OSX
darwin_change_default_file_handles ();
#endif
if (g_hasenv ("MONO_NO_SMP"))
mono_set_use_smp (FALSE);
#ifdef MONO_JEMALLOC_ENABLED
gboolean use_jemalloc = FALSE;
#ifdef MONO_JEMALLOC_DEFAULT
use_jemalloc = TRUE;
#endif
if (!use_jemalloc)
use_jemalloc = g_hasenv ("MONO_USE_JEMALLOC");
if (use_jemalloc)
mono_init_jemalloc ();
#endif
g_log_set_always_fatal (G_LOG_LEVEL_ERROR);
g_log_set_fatal_mask (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR);
opt = mono_parse_default_optimizations (NULL);
enable_debugging = TRUE;
mono_options_parse_options ((const char**)argv + 1, argc - 1, &argc, error);
argc ++;
if (!is_ok (error)) {
g_printerr ("%s", mono_error_get_message (error));
mono_error_cleanup (error);
return 1;
}
for (i = 1; i < argc; ++i) {
if (argv [i] [0] != '-')
break;
if (strcmp (argv [i], "--regression") == 0) {
action = DO_REGRESSION;
} else if (strncmp (argv [i], "--single-method=", 16) == 0) {
char *full_opts = g_strdup_printf ("-all,%s", argv [i] + 16);
action = DO_SINGLE_METHOD_REGRESSION;
mono_single_method_regression_opt = parse_optimizations (opt, full_opts, TRUE);
g_free (full_opts);
} else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) {
mini_verbose_level++;
} else if (strcmp (argv [i], "--version=number") == 0) {
g_print ("%s\n", VERSION);
return 0;
} else if (strcmp (argv [i], "--version") == 0 || strcmp (argv [i], "-V") == 0) {
char *build = mono_get_runtime_build_info ();
char *gc_descr;
g_print ("Mono JIT compiler version %s\nCopyright (C) Novell, Inc, Xamarin Inc and Contributors. www.mono-project.com\n", build);
g_free (build);
char *info = mono_get_version_info ();
g_print (info);
g_free (info);
gc_descr = mono_gc_get_description ();
g_print ("\tGC: %s\n", gc_descr);
g_free (gc_descr);
return 0;
} else if (strcmp (argv [i], "--help") == 0 || strcmp (argv [i], "-h") == 0) {
mini_usage ();
return 0;
} else if (strcmp (argv [i], "--help-trace") == 0){
mini_trace_usage ();
return 0;
} else if (strcmp (argv [i], "--help-devel") == 0){
mini_usage_jitdeveloper ();
return 0;
} else if (strcmp (argv [i], "--help-debug") == 0){
mini_debug_usage ();
return 0;
} else if (strcmp (argv [i], "--list-opt") == 0){
mini_usage_list_opt ();
return 0;
} else if (strncmp (argv [i], "--statfile", 10) == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --statfile requires a filename argument\n");
return 1;
}
mini_stats_fd = fopen (argv [++i], "w+");
} else if (strncmp (argv [i], "--optimize=", 11) == 0) {
opt = parse_optimizations (opt, argv [i] + 11, TRUE);
} else if (strncmp (argv [i], "-O=", 3) == 0) {
opt = parse_optimizations (opt, argv [i] + 3, TRUE);
} else if (strncmp (argv [i], "--bisect=", 9) == 0) {
char *param = argv [i] + 9;
char *sep = strchr (param, ':');
if (!sep) {
fprintf (stderr, "Error: --bisect requires OPT:FILENAME\n");
return 1;
}
char *opt_string = g_strndup (param, sep - param);
guint32 opt = parse_optimizations (0, opt_string, FALSE);
g_free (opt_string);
mono_set_bisect_methods (opt, sep + 1);
} else if (strcmp (argv [i], "--gc=sgen") == 0) {
switch_gc (argv, "sgen");
} else if (strcmp (argv [i], "--gc=boehm") == 0) {
switch_gc (argv, "boehm");
} else if (strncmp (argv[i], "--gc-params=", 12) == 0) {
mono_gc_params_set (argv[i] + 12);
} else if (strncmp (argv[i], "--gc-debug=", 11) == 0) {
mono_gc_debug_set (argv[i] + 11);
}
#ifdef TARGET_OSX
else if (strcmp (argv [i], "--arch=32") == 0) {
switch_arch (argv, "32");
} else if (strcmp (argv [i], "--arch=64") == 0) {
switch_arch (argv, "64");
}
#endif
else if (strcmp (argv [i], "--config") == 0) {
if (i +1 >= argc){
fprintf (stderr, "error: --config requires a filename argument\n");
return 1;
}
++i;
#ifdef HOST_WIN32
} else if (strcmp (argv [i], "--mixed-mode") == 0) {
mixed_mode = TRUE;
#endif
#ifndef DISABLE_JIT
} else if (strcmp (argv [i], "--ncompile") == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --ncompile requires an argument\n");
return 1;
}
count = atoi (argv [++i]);
action = DO_BENCH;
#endif
} else if (strcmp (argv [i], "--trace") == 0) {
trace_options = (char*)"";
} else if (strncmp (argv [i], "--trace=", 8) == 0) {
trace_options = &argv [i][8];
} else if (strcmp (argv [i], "--breakonex") == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
opt->break_on_exc = TRUE;
} else if (strcmp (argv [i], "--break") == 0) {
if (i+1 >= argc){
fprintf (stderr, "Missing method name in --break command line option\n");
return 1;
}
if (!mono_debugger_insert_breakpoint (argv [++i], FALSE))
fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]);
} else if (strcmp (argv [i], "--break-at-bb") == 0) {
if (i + 2 >= argc) {
fprintf (stderr, "Missing method name or bb num in --break-at-bb command line option.");
return 1;
}
mono_break_at_bb_method = mono_method_desc_new (argv [++i], TRUE);
if (mono_break_at_bb_method == NULL) {
fprintf (stderr, "Method name is in a bad format in --break-at-bb command line option.");
return 1;
}
mono_break_at_bb_bb_num = atoi (argv [++i]);
} else if (strcmp (argv [i], "--inject-async-exc") == 0) {
if (i + 2 >= argc) {
fprintf (stderr, "Missing method name or position in --inject-async-exc command line option\n");
return 1;
}
mono_inject_async_exc_method = mono_method_desc_new (argv [++i], TRUE);
if (mono_inject_async_exc_method == NULL) {
fprintf (stderr, "Method name is in a bad format in --inject-async-exc command line option\n");
return 1;
}
mono_inject_async_exc_pos = atoi (argv [++i]);
} else if (strcmp (argv [i], "--verify-all") == 0) {
g_warning ("--verify-all is obsolete, ignoring");
} else if (strcmp (argv [i], "--full-aot") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_FULL);
} else if (strcmp (argv [i], "--llvmonly") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY);
} else if (strcmp (argv [i], "--hybrid-aot") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_HYBRID);
} else if (strcmp (argv [i], "--full-aot-interp") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_INTERP);
} else if (strcmp (argv [i], "--llvmonly-interp") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY_INTERP);
} else if (strcmp (argv [i], "--print-vtable") == 0) {
mono_print_vtable = TRUE;
} else if (strcmp (argv [i], "--stats") == 0) {
enable_runtime_stats ();
} else if (strncmp (argv [i], "--stats=", 8) == 0) {
enable_runtime_stats ();
if (mono_stats_method_desc)
g_free (mono_stats_method_desc);
mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8);
#ifndef DISABLE_AOT
} else if (strcmp (argv [i], "--aot") == 0) {
error_if_aot_unsupported ();
mono_compile_aot = TRUE;
} else if (strncmp (argv [i], "--aot=", 6) == 0) {
error_if_aot_unsupported ();
mono_compile_aot = TRUE;
if (aot_options) {
char *tmp = g_strdup_printf ("%s,%s", aot_options, &argv [i][6]);
g_free (aot_options);
aot_options = tmp;
} else {
aot_options = g_strdup (&argv [i][6]);
}
#endif
} else if (strncmp (argv [i], "--apply-bindings=", 17) == 0) {
extra_bindings_config_file = &argv[i][17];
} else if (strncmp (argv [i], "--aot-path=", 11) == 0) {
char **splitted;
splitted = g_strsplit (argv [i] + 11, G_SEARCHPATH_SEPARATOR_S, 1000);
while (*splitted) {
char *tmp = *splitted;
mono_aot_paths = g_list_append (mono_aot_paths, g_strdup (tmp));
g_free (tmp);
splitted++;
}
} else if (strncmp (argv [i], "--compile-all=", 14) == 0) {
action = DO_COMPILE;
recompilation_times = atoi (argv [i] + 14);
} else if (strcmp (argv [i], "--compile-all") == 0) {
action = DO_COMPILE;
} else if (strncmp (argv [i], "--runtime=", 10) == 0) {
forced_version = &argv [i][10];
} else if (strcmp (argv [i], "--jitmap") == 0) {
mono_enable_jit_map ();
#ifdef ENABLE_JIT_DUMP
} else if (strcmp (argv [i], "--jitdump") == 0) {
mono_enable_jit_dump ();
#endif
} else if (strcmp (argv [i], "--profile") == 0) {
mini_add_profiler_argument (NULL);
} else if (strncmp (argv [i], "--profile=", 10) == 0) {
mini_add_profiler_argument (argv [i] + 10);
} else if (strncmp (argv [i], "--agent=", 8) == 0) {
if (agents == NULL)
agents = g_ptr_array_new ();
g_ptr_array_add (agents, argv [i] + 8);
} else if (strncmp (argv [i], "--attach=", 9) == 0) {
g_warning ("--attach= option no longer supported.");
} else if (strcmp (argv [i], "--compile") == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --compile option requires a method name argument\n");
return 1;
}
mname = argv [++i];
action = DO_BENCH;
#ifndef DISABLE_JIT
} else if (strncmp (argv [i], "--graph=", 8) == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --graph option requires a method name argument\n");
return 1;
}
mono_graph_options = mono_parse_graph_options (argv [i] + 8);
mname = argv [++i];
action = DO_DRAW;
} else if (strcmp (argv [i], "--graph") == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --graph option requires a method name argument\n");
return 1;
}
mname = argv [++i];
mono_graph_options = MONO_GRAPH_CFG;
action = DO_DRAW;
#endif
} else if (strcmp (argv [i], "--debug") == 0) {
enable_debugging = TRUE;
} else if (strncmp (argv [i], "--debug=", 8) == 0) {
enable_debugging = TRUE;
if (!parse_debug_options (argv [i] + 8))
return 1;
MonoDebugOptions *opt = mini_get_debug_options ();
if (!opt->enabled) {
enable_debugging = FALSE;
}
} else if (strncmp (argv [i], "--debugger-agent=", 17) == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
mono_debugger_agent_parse_options (g_strdup (argv [i] + 17));
opt->mdb_optimizations = TRUE;
enable_debugging = TRUE;
} else if (strcmp (argv [i], "--security") == 0) {
fprintf (stderr, "error: --security is obsolete.");
return 1;
} else if (strncmp (argv [i], "--security=", 11) == 0) {
if (strcmp (argv [i] + 11, "core-clr") == 0) {
fprintf (stderr, "error: --security=core-clr is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "core-clr-test") == 0) {
fprintf (stderr, "error: --security=core-clr-test is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "cas") == 0) {
fprintf (stderr, "error: --security=cas is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "validil") == 0) {
fprintf (stderr, "error: --security=validil is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "verifiable") == 0) {
fprintf (stderr, "error: --securty=verifiable is obsolete.");
return 1;
} else {
fprintf (stderr, "error: --security= option has invalid argument (cas, core-clr, verifiable or validil)\n");
return 1;
}
} else if (strcmp (argv [i], "--desktop") == 0) {
mono_gc_set_desktop_mode ();
/* Put more desktop-specific optimizations here */
} else if (strcmp (argv [i], "--server") == 0){
mono_config_set_server_mode (TRUE);
/* Put more server-specific optimizations here */
} else if (strcmp (argv [i], "--inside-mdb") == 0) {
action = DO_DEBUGGER;
} else if (strncmp (argv [i], "--wapi=", 7) == 0) {
fprintf (stderr, "--wapi= option no longer supported\n.");
return 1;
} else if (strcmp (argv [i], "--no-x86-stack-align") == 0) {
mono_do_x86_stack_align = FALSE;
#ifdef MONO_JIT_INFO_TABLE_TEST
} else if (strcmp (argv [i], "--test-jit-info-table") == 0) {
test_jit_info_table = TRUE;
#endif
} else if (strcmp (argv [i], "--llvm") == 0) {
#ifndef MONO_ARCH_LLVM_SUPPORTED
fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n");
#elif !defined(ENABLE_LLVM)
fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n");
#else
mono_use_llvm = TRUE;
#endif
} else if (strcmp (argv [i], "--nollvm") == 0){
mono_use_llvm = FALSE;
} else if (strcmp (argv [i], "--ffast-math") == 0){
mono_use_fast_math = TRUE;
} else if ((strcmp (argv [i], "--interpreter") == 0) || !strcmp (argv [i], "--interp")) {
mono_runtime_set_execution_mode (MONO_EE_MODE_INTERP);
} else if (strncmp (argv [i], "--interp=", 9) == 0) {
mono_runtime_set_execution_mode_full (MONO_EE_MODE_INTERP, FALSE);
mono_interp_opts_string = argv [i] + 9;
} else if (strcmp (argv [i], "--print-icall-table") == 0) {
#ifdef ENABLE_ICALL_SYMBOL_MAP
print_icall_table ();
exit (0);
#else
fprintf (stderr, "--print-icall-table requires a runtime configured with the --enable-icall-symbol-map option.\n");
exit (1);
#endif
} else if (strncmp (argv [i], "--assembly-loader=", strlen("--assembly-loader=")) == 0) {
gchar *arg = argv [i] + strlen ("--assembly-loader=");
if (strcmp (arg, "strict") == 0)
mono_loader_set_strict_assembly_name_check (TRUE);
else if (strcmp (arg, "legacy") == 0)
mono_loader_set_strict_assembly_name_check (FALSE);
else
fprintf (stderr, "Warning: unknown argument to --assembly-loader. Should be \"strict\" or \"legacy\"\n");
} else if (strncmp (argv [i], MONO_HANDLERS_ARGUMENT, MONO_HANDLERS_ARGUMENT_LEN) == 0) {
//Install specific custom handlers.
if (!mono_runtime_install_custom_handlers (argv[i] + MONO_HANDLERS_ARGUMENT_LEN)) {
fprintf (stderr, "error: " MONO_HANDLERS_ARGUMENT ", one or more unknown handlers: '%s'\n", argv [i]);
return 1;
}
} else if (strcmp (argv [i], "--help-handlers") == 0) {
mono_runtime_install_custom_handlers_usage ();
return 0;
} else if (strncmp (argv [i], "--response=", 11) == 0){
gchar *response_content;
gchar *response_options;
gsize response_content_len;
if (!g_file_get_contents (&argv[i][11], &response_content, &response_content_len, NULL)){
fprintf (stderr, "The specified response file can not be read\n");
exit (1);
}
response_options = response_content;
// Check for UTF8 BOM in file and remove if found.
if (response_content_len >= 3 && response_content [0] == '\xef' && response_content [1] == '\xbb' && response_content [2] == '\xbf') {
response_content_len -= 3;
response_options += 3;
}
if (response_content_len == 0) {
fprintf (stderr, "The specified response file is empty\n");
exit (1);
}
mono_parse_response_options (response_options, &argc, &argv, FALSE);
g_free (response_content);
} else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) {
} else if (strcmp (argv [i], "--use-map-jit") == 0){
mono_setmmapjit (TRUE);
} else {
fprintf (stderr, "Unknown command line option: '%s'\n", argv [i]);
return 1;
}
}
#if defined(DISABLE_HW_TRAPS) || defined(MONO_ARCH_DISABLE_HW_TRAPS)
// Signal handlers not available
{
MonoDebugOptions *opt = mini_get_debug_options ();
opt->explicit_null_checks = TRUE;
}
#endif
if (!argv [i]) {
mini_usage ();
return 1;
}
if (g_hasenv ("MONO_XDEBUG"))
enable_debugging = TRUE;
#ifdef MONO_CROSS_COMPILE
if (!mono_compile_aot) {
fprintf (stderr, "This mono runtime is compiled for cross-compiling. Only the --aot option is supported.\n");
exit (1);
}
#if TARGET_SIZEOF_VOID_P == 4 && (defined(TARGET_ARM64) || defined(TARGET_AMD64)) && !defined(MONO_ARCH_ILP32)
fprintf (stderr, "Can't cross-compile on 32-bit platforms to 64-bit architecture.\n");
exit (1);
#endif
#endif
if (mono_compile_aot || action == DO_EXEC || action == DO_DEBUGGER) {
g_set_prgname (argv[i]);
}
mono_counters_init ();
#ifndef HOST_WIN32
mono_w32handle_init ();
#endif
/* Set rootdir before loading config */
mono_set_rootdir ();
if (trace_options != NULL){
/*
* Need to call this before mini_init () so we can trace methods
* compiled there too.
*/
mono_jit_trace_calls = mono_trace_set_options (trace_options);
if (mono_jit_trace_calls == NULL)
exit (1);
}
#ifdef DISABLE_JIT
if (!mono_aot_only && !mono_use_interpreter) {
fprintf (stderr, "This runtime has been configured with --enable-minimal=jit, so the --full-aot command line option is required.\n");
exit (1);
}
#endif
if (action == DO_DEBUGGER) {
enable_debugging = TRUE;
g_print ("The Mono Debugger is no longer supported.\n");
return 1;
} else if (enable_debugging)
mono_debug_init (MONO_DEBUG_FORMAT_MONO);
#ifdef HOST_WIN32
if (mixed_mode)
mono_load_coree (argv [i]);
#endif
mono_set_defaults (mini_verbose_level, opt);
mono_set_os_args (argc, argv);
domain = mini_init (argv [i], forced_version);
mono_gc_set_stack_end (&domain);
if (agents) {
int i;
for (i = 0; i < agents->len; ++i) {
int res = load_agent (domain, (char*)g_ptr_array_index (agents, i));
if (res) {
g_ptr_array_free (agents, TRUE);
mini_cleanup (domain);
return 1;
}
}
g_ptr_array_free (agents, TRUE);
}
switch (action) {
case DO_SINGLE_METHOD_REGRESSION:
case DO_REGRESSION:
return mono_exec_regression_internal (mini_verbose_level, argc -i, argv + i, action == DO_SINGLE_METHOD_REGRESSION);
case DO_BENCH:
if (argc - i != 1 || mname == NULL) {
g_print ("Usage: mini --ncompile num --compile method assembly\n");
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
case DO_COMPILE:
if (argc - i != 1) {
mini_usage ();
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
case DO_DRAW:
if (argc - i != 1 || mname == NULL) {
mini_usage ();
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
default:
if (argc - i < 1) {
mini_usage ();
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
}
#ifdef MONO_JIT_INFO_TABLE_TEST
if (test_jit_info_table)
jit_info_table_test (domain);
#endif
if (mono_compile_aot && extra_bindings_config_file != NULL) {
apply_root_domain_configuration_file_bindings (domain, extra_bindings_config_file);
}
MonoAssemblyOpenRequest open_req;
mono_assembly_request_prepare_open (&open_req, mono_alc_get_default ());
assembly = mono_assembly_request_open (aname, &open_req, &open_status);
if (!assembly && !mono_compile_aot) {
fprintf (stderr, "Cannot open assembly '%s': %s.\n", aname, mono_image_strerror (open_status));
mini_cleanup (domain);
return 2;
}
mono_callspec_set_assembly (assembly);
if (mono_compile_aot || action == DO_EXEC) {
const char *error;
//mono_set_rootdir ();
error = mono_check_corlib_version ();
if (error) {
fprintf (stderr, "Corlib not in sync with this runtime: %s\n", error);
fprintf (stderr, "Loaded from: %s\n",
mono_defaults.corlib? mono_image_get_filename (mono_defaults.corlib): "unknown");
fprintf (stderr, "Download a newer corlib or a newer runtime at http://www.mono-project.com/download.\n");
exit (1);
}
#if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_CONSOLE
/* Detach console when executing IMAGE_SUBSYSTEM_WINDOWS_GUI on win32 */
if (!enable_debugging && !mono_compile_aot && mono_assembly_get_image_internal (assembly)->image_info->cli_header.nt.pe_subsys_required == IMAGE_SUBSYSTEM_WINDOWS_GUI)
FreeConsole ();
#endif
main_args.domain = domain;
main_args.file = aname;
main_args.argc = argc - i;
main_args.argv = argv + i;
main_args.opts = opt;
main_args.aot_options = aot_options;
main_thread_handler (&main_args);
mono_thread_manage_internal ();
mini_cleanup (domain);
/* Look up return value from System.Environment.ExitCode */
i = mono_environment_exitcode_get ();
return i;
} else if (action == DO_COMPILE) {
compile_all_methods (assembly, mini_verbose_level, opt, recompilation_times);
mini_cleanup (domain);
return 0;
} else if (action == DO_DEBUGGER) {
return 1;
}
desc = mono_method_desc_new (mname, 0);
if (!desc) {
g_print ("Invalid method name %s\n", mname);
mini_cleanup (domain);
return 3;
}
method = mono_method_desc_search_in_image (desc, mono_assembly_get_image_internal (assembly));
if (!method) {
g_print ("Cannot find method %s\n", mname);
mini_cleanup (domain);
return 3;
}
#ifndef DISABLE_JIT
MonoCompile *cfg;
if (action == DO_DRAW) {
int part = 0;
switch (mono_graph_options) {
case MONO_GRAPH_DTREE:
part = 1;
opt |= MONO_OPT_LOOP;
break;
case MONO_GRAPH_CFG_CODE:
part = 1;
break;
case MONO_GRAPH_CFG_SSA:
part = 2;
break;
case MONO_GRAPH_CFG_OPTCODE:
part = 3;
break;
default:
break;
}
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
MonoMethod *nm;
nm = mono_marshal_get_native_wrapper (method, TRUE, FALSE);
cfg = mini_method_compile (nm, opt, (JitFlags)0, part, -1);
}
else
cfg = mini_method_compile (method, opt, (JitFlags)0, part, -1);
if ((mono_graph_options & MONO_GRAPH_CFG_SSA) && !(cfg->comp_done & MONO_COMP_SSA)) {
g_warning ("no SSA info available (use -O=deadce)");
return 1;
}
mono_draw_graph (cfg, mono_graph_options);
mono_destroy_compile (cfg);
} else if (action == DO_BENCH) {
if (mini_stats_fd) {
const char *n;
double no_opt_time = 0.0;
GTimer *timer = g_timer_new ();
fprintf (mini_stats_fd, "$stattitle = \'Compilations times for %s\';\n",
mono_method_full_name (method, TRUE));
fprintf (mini_stats_fd, "@data = (\n");
fprintf (mini_stats_fd, "[");
for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) {
opt = opt_sets [i];
n = mono_opt_descr (opt);
if (!n [0])
n = "none";
fprintf (mini_stats_fd, "\"%s\",", n);
}
fprintf (mini_stats_fd, "],\n[");
for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) {
int j;
double elapsed;
opt = opt_sets [i];
g_timer_start (timer);
for (j = 0; j < count; ++j) {
cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1);
mono_destroy_compile (cfg);
}
g_timer_stop (timer);
elapsed = g_timer_elapsed (timer, NULL);
if (!opt)
no_opt_time = elapsed;
fprintf (mini_stats_fd, "%f, ", elapsed);
}
fprintf (mini_stats_fd, "]");
if (no_opt_time > 0.0) {
fprintf (mini_stats_fd, ", \n[");
for (i = 0; i < G_N_ELEMENTS (opt_sets); i++)
fprintf (mini_stats_fd, "%f,", no_opt_time);
fprintf (mini_stats_fd, "]");
}
fprintf (mini_stats_fd, ");\n");
} else {
for (i = 0; i < count; ++i) {
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
method = mono_marshal_get_native_wrapper (method, TRUE, FALSE);
cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1);
mono_destroy_compile (cfg);
}
}
} else {
cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1);
mono_destroy_compile (cfg);
}
#endif
mini_cleanup (domain);
return 0;
}
/**
* mono_jit_init:
*/
MonoDomain *
mono_jit_init (const char *file)
{
MonoDomain *ret = mini_init (file, NULL);
MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc
return ret;
}
/**
* mono_jit_init_version:
* \param domain_name the name of the root domain
* \param runtime_version the version of the runtime to load
*
* Use this version when you want to force a particular runtime
* version to be used. By default Mono will pick the runtime that is
* referenced by the initial assembly (specified in \p file), this
* routine allows programmers to specify the actual runtime to be used
* as the initial runtime is inherited by all future assemblies loaded
* (since Mono does not support having more than one mscorlib runtime
* loaded at once).
*
* The \p runtime_version can be one of these strings: "v4.0.30319" for
* desktop, "mobile" for mobile or "moonlight" for Silverlight compat.
* If an unrecognized string is input, the vm will default to desktop.
*
* \returns the \c MonoDomain representing the domain where the assembly
* was loaded.
*/
MonoDomain *
mono_jit_init_version (const char *domain_name, const char *runtime_version)
{
MonoDomain *ret = mini_init (domain_name, runtime_version);
MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc
return ret;
}
MonoDomain *
mono_jit_init_version_for_test_only (const char *domain_name, const char *runtime_version)
{
MonoDomain *ret = mini_init (domain_name, runtime_version);
return ret;
}
/**
* mono_jit_cleanup:
*/
void
mono_jit_cleanup (MonoDomain *domain)
{
MONO_STACKDATA (dummy);
(void) mono_threads_enter_gc_unsafe_region_unbalanced_internal (&dummy);
// after mini_cleanup everything is cleaned up so MONO_EXIT_GC_UNSAFE
// can't work and doesn't make sense.
mono_thread_manage_internal ();
mini_cleanup (domain);
}
void
mono_jit_set_aot_only (gboolean val)
{
mono_aot_only = val;
mono_ee_features.use_aot_trampolines = val;
}
static void
mono_runtime_set_execution_mode_full (int mode, gboolean override)
{
static gboolean mode_initialized = FALSE;
if (mode_initialized && !override)
return;
mode_initialized = TRUE;
memset (&mono_ee_features, 0, sizeof (mono_ee_features));
switch (mode) {
case MONO_AOT_MODE_LLVMONLY:
mono_aot_only = TRUE;
mono_llvm_only = TRUE;
mono_ee_features.use_aot_trampolines = TRUE;
break;
case MONO_AOT_MODE_FULL:
mono_aot_only = TRUE;
mono_ee_features.use_aot_trampolines = TRUE;
break;
case MONO_AOT_MODE_HYBRID:
mono_set_generic_sharing_vt_supported (TRUE);
mono_set_partial_sharing_supported (TRUE);
break;
case MONO_AOT_MODE_INTERP:
mono_aot_only = TRUE;
mono_use_interpreter = TRUE;
mono_ee_features.use_aot_trampolines = TRUE;
break;
case MONO_AOT_MODE_INTERP_LLVMONLY:
mono_aot_only = TRUE;
mono_use_interpreter = TRUE;
mono_llvm_only = TRUE;
mono_ee_features.force_use_interpreter = TRUE;
break;
case MONO_AOT_MODE_LLVMONLY_INTERP:
mono_aot_only = TRUE;
mono_use_interpreter = TRUE;
mono_llvm_only = TRUE;
break;
case MONO_AOT_MODE_INTERP_ONLY:
mono_check_interp_supported ();
mono_use_interpreter = TRUE;
mono_ee_features.force_use_interpreter = TRUE;
break;
case MONO_AOT_MODE_NORMAL:
case MONO_AOT_MODE_NONE:
break;
default:
g_error ("Unknown execution-mode %d", mode);
}
}
static void
mono_runtime_set_execution_mode (int mode)
{
mono_runtime_set_execution_mode_full (mode, TRUE);
}
/**
* mono_jit_set_aot_mode:
*/
void
mono_jit_set_aot_mode (MonoAotMode mode)
{
/* we don't want to set mono_aot_mode twice */
static gboolean inited;
g_assert (!inited);
mono_aot_mode = mode;
inited = TRUE;
mono_runtime_set_execution_mode (mode);
}
mono_bool
mono_jit_aot_compiling (void)
{
return mono_compile_aot;
}
/**
* mono_jit_set_trace_options:
* \param options string representing the trace options
* Set the options of the tracing engine. This function can be called before initializing
* the mono runtime. See the --trace mono(1) manpage for the options format.
*
* \returns TRUE if the options were parsed and set correctly, FALSE otherwise.
*/
gboolean
mono_jit_set_trace_options (const char* options)
{
MonoCallSpec *trace_opt = mono_trace_set_options (options);
if (trace_opt == NULL)
return FALSE;
mono_jit_trace_calls = trace_opt;
return TRUE;
}
/**
* mono_set_signal_chaining:
*
* Enable/disable signal chaining. This should be called before \c mono_jit_init.
* If signal chaining is enabled, the runtime saves the original signal handlers before
* installing its own handlers, and calls the original ones in the following cases:
* - a \c SIGSEGV / \c SIGABRT signal received while executing native (i.e. not JITted) code.
* - \c SIGPROF
* - \c SIGFPE
* - \c SIGQUIT
* - \c SIGUSR2
* Signal chaining only works on POSIX platforms.
*/
void
mono_set_signal_chaining (gboolean chain_signals)
{
mono_do_signal_chaining = chain_signals;
}
/**
* mono_set_crash_chaining:
*
* Enable/disable crash chaining due to signals. When a fatal signal is delivered and
* Mono doesn't know how to handle it, it will invoke the crash handler. If chrash chaining
* is enabled, it will first print its crash information and then try to chain with the native handler.
*/
void
mono_set_crash_chaining (gboolean chain_crashes)
{
mono_do_crash_chaining = chain_crashes;
}
/**
* mono_parse_options_from:
* \param options string containing strings
* \param ref_argc pointer to the \c argc variable that might be updated
* \param ref_argv pointer to the \c argv string vector variable that might be updated
*
* This function parses the contents of the \c MONO_ENV_OPTIONS
* environment variable as if they were parsed by a command shell
* splitting the contents by spaces into different elements of the
* \p argv vector. This method supports quoting with both the " and '
* characters. Inside quoting, spaces and tabs are significant,
* otherwise, they are considered argument separators.
*
* The \ character can be used to escape the next character which will
* be added to the current element verbatim. Typically this is used
* inside quotes. If the quotes are not balanced, this method
*
* If the environment variable is empty, no changes are made
* to the values pointed by \p ref_argc and \p ref_argv.
*
* Otherwise the \p ref_argv is modified to point to a new array that contains
* all the previous elements contained in the vector, plus the values parsed.
* The \p argc is updated to match the new number of parameters.
*
* \returns The value NULL is returned on success, otherwise a \c g_strdup allocated
* string is returned (this is an alias to \c malloc under normal circumstances) that
* contains the error message that happened during parsing.
*/
char *
mono_parse_options_from (const char *options, int *ref_argc, char **ref_argv [])
{
return mono_parse_options (options, ref_argc, ref_argv, TRUE);
}
static void
merge_parsed_options (GPtrArray *parsed_options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
int argc = *ref_argc;
char **argv = *ref_argv;
if (parsed_options->len > 0){
int new_argc = parsed_options->len + argc;
char **new_argv = g_new (char *, new_argc + 1);
guint i;
guint j;
new_argv [0] = argv [0];
i = 1;
if (prepend){
/* First the environment variable settings, to allow the command line options to override */
for (i = 0; i < parsed_options->len; i++)
new_argv [i+1] = (char *)g_ptr_array_index (parsed_options, i);
i++;
}
for (j = 1; j < argc; j++)
new_argv [i++] = argv [j];
if (!prepend){
for (j = 0; j < parsed_options->len; j++)
new_argv [i++] = (char *)g_ptr_array_index (parsed_options, j);
}
new_argv [i] = NULL;
*ref_argc = new_argc;
*ref_argv = new_argv;
}
}
static char *
mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
if (options == NULL)
return NULL;
GPtrArray *array = g_ptr_array_new ();
GString *buffer = g_string_new ("");
const char *p;
gboolean in_quotes = FALSE;
char quote_char = '\0';
for (p = options; *p; p++){
switch (*p){
case ' ': case '\t': case '\n':
if (!in_quotes) {
if (buffer->len != 0){
g_ptr_array_add (array, g_strdup (buffer->str));
g_string_truncate (buffer, 0);
}
} else {
g_string_append_c (buffer, *p);
}
break;
case '\\':
if (p [1]){
g_string_append_c (buffer, p [1]);
p++;
}
break;
case '\'':
case '"':
if (in_quotes) {
if (quote_char == *p)
in_quotes = FALSE;
else
g_string_append_c (buffer, *p);
} else {
in_quotes = TRUE;
quote_char = *p;
}
break;
default:
g_string_append_c (buffer, *p);
break;
}
}
if (in_quotes)
return g_strdup_printf ("Unmatched quotes in value: [%s]\n", options);
if (buffer->len != 0)
g_ptr_array_add (array, g_strdup (buffer->str));
g_string_free (buffer, TRUE);
merge_parsed_options (array, ref_argc, ref_argv, prepend);
g_ptr_array_free (array, TRUE);
return NULL;
}
#if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV
#include <shellapi.h>
static char *
mono_win32_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
int argc;
gunichar2 **argv;
gunichar2 *optionsw;
if (!options)
return NULL;
GPtrArray *array = g_ptr_array_new ();
optionsw = g_utf8_to_utf16 (options, -1, NULL, NULL, NULL);
if (optionsw) {
gunichar2 *p;
gboolean in_quotes = FALSE;
gunichar2 quote_char = L'\0';
for (p = optionsw; *p; p++){
switch (*p){
case L'\n':
if (!in_quotes)
*p = L' ';
break;
case L'\'':
case L'"':
if (in_quotes) {
if (quote_char == *p)
in_quotes = FALSE;
} else {
in_quotes = TRUE;
quote_char = *p;
}
break;
}
}
argv = CommandLineToArgvW (optionsw, &argc);
if (argv) {
for (int i = 0; i < argc; i++)
g_ptr_array_add (array, g_utf16_to_utf8 (argv[i], -1, NULL, NULL, NULL));
LocalFree (argv);
}
g_free (optionsw);
}
merge_parsed_options (array, ref_argc, ref_argv, prepend);
g_ptr_array_free (array, TRUE);
return NULL;
}
static char *
mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
return mono_win32_parse_options (options, ref_argc, ref_argv, prepend);
}
#else
static char *
mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
return mono_parse_options (options, ref_argc, ref_argv, prepend);
}
#endif
/**
* mono_parse_env_options:
* \param ref_argc pointer to the \c argc variable that might be updated
* \param ref_argv pointer to the \c argv string vector variable that might be updated
*
* This function parses the contents of the \c MONO_ENV_OPTIONS
* environment variable as if they were parsed by a command shell
* splitting the contents by spaces into different elements of the
* \p argv vector. This method supports quoting with both the " and '
* characters. Inside quoting, spaces and tabs are significant,
* otherwise, they are considered argument separators.
*
* The \ character can be used to escape the next character which will
* be added to the current element verbatim. Typically this is used
* inside quotes. If the quotes are not balanced, this method
*
* If the environment variable is empty, no changes are made
* to the values pointed by \p ref_argc and \p ref_argv.
*
* Otherwise the \p ref_argv is modified to point to a new array that contains
* all the previous elements contained in the vector, plus the values parsed.
* The \p argc is updated to match the new number of parameters.
*
* If there is an error parsing, this method will terminate the process by
* calling exit(1).
*
* An alternative to this method that allows an arbitrary string to be parsed
* and does not exit on error is the `api:mono_parse_options_from`.
*/
void
mono_parse_env_options (int *ref_argc, char **ref_argv [])
{
char *ret;
char *env_options = g_getenv ("MONO_ENV_OPTIONS");
if (env_options == NULL)
return;
ret = mono_parse_options_from (env_options, ref_argc, ref_argv);
g_free (env_options);
if (ret == NULL)
return;
fprintf (stderr, "%s", ret);
exit (1);
}
MonoDebugOptions *
get_mini_debug_options (void)
{
return &mini_debug_options;
}
| /**
* \file
* The new mono JIT compiler.
*
* Author:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2002-2003 Ximian, Inc.
* (C) 2003-2006 Novell, Inc.
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <signal.h>
#if HAVE_SCHED_SETAFFINITY
#include <sched.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <mono/metadata/assembly-internals.h>
#include <mono/metadata/image-internals.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/object.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/reflection-internals.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-config.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/environment-internals.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/coree.h>
#include <mono/metadata/w32process.h>
#include "mono/utils/mono-counters.h"
#include "mono/utils/mono-hwcap.h"
#include "mono/utils/mono-logger-internals.h"
#include "mono/utils/options.h"
#include "mono/metadata/w32handle.h"
#include "mono/metadata/callspec.h"
#include "mono/metadata/custom-attrs-internals.h"
#include <mono/utils/w32subset.h>
#include <mono/metadata/components.h>
#include <mono/mini/debugger-agent-external.h>
#include "mini.h"
#include <mono/jit/jit.h>
#include "aot-compiler.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
#include "interp/interp.h"
#include <string.h>
#include <ctype.h>
#include <locale.h>
#if TARGET_OSX
# include <sys/resource.h>
#endif
static FILE *mini_stats_fd;
static void mini_usage (void);
static void mono_runtime_set_execution_mode (int mode);
static void mono_runtime_set_execution_mode_full (int mode, gboolean override);
static int mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[]);
#ifdef HOST_WIN32
/* Need this to determine whether to detach console */
#include <mono/metadata/cil-coff.h>
/* This turns off command line globbing under win32 */
int _CRT_glob = 0;
#endif
typedef void (*OptFunc) (const char *p);
#undef OPTFLAG
// This, instead of an array of pointers, to optimize away a pointer and a relocation per string.
#define MSGSTRFIELD(line) MSGSTRFIELD1(line)
#define MSGSTRFIELD1(line) str##line
static const struct msgstr_t {
#define OPTFLAG(id,shift,name,desc) char MSGSTRFIELD(__LINE__) [sizeof (name) + sizeof (desc)];
#include "optflags-def.h"
#undef OPTFLAG
} opstr = {
#define OPTFLAG(id,shift,name,desc) name "\0" desc,
#include "optflags-def.h"
#undef OPTFLAG
};
static const gint16 opt_names [] = {
#define OPTFLAG(id,shift,name,desc) offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
#include "optflags-def.h"
#undef OPTFLAG
};
#define optflag_get_name(id) ((const char*)&opstr + opt_names [(id)])
#define optflag_get_desc(id) (optflag_get_name(id) + 1 + strlen (optflag_get_name(id)))
#define DEFAULT_OPTIMIZATIONS ( \
MONO_OPT_PEEPHOLE | \
MONO_OPT_CFOLD | \
MONO_OPT_INLINE | \
MONO_OPT_CONSPROP | \
MONO_OPT_COPYPROP | \
MONO_OPT_DEADCE | \
MONO_OPT_BRANCH | \
MONO_OPT_LINEARS | \
MONO_OPT_INTRINS | \
MONO_OPT_LOOP | \
MONO_OPT_EXCEPTION | \
MONO_OPT_CMOV | \
MONO_OPT_GSHARED | \
MONO_OPT_SIMD | \
MONO_OPT_ALIAS_ANALYSIS | \
MONO_OPT_AOT | \
MONO_OPT_FLOAT32)
#define EXCLUDED_FROM_ALL (MONO_OPT_PRECOMP | MONO_OPT_UNSAFE | MONO_OPT_GSHAREDVT)
static char *mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend);
static char *mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend);
static guint32
parse_optimizations (guint32 opt, const char* p, gboolean cpu_opts)
{
guint32 exclude = 0;
const char *n;
int i, invert;
char **parts, **ptr;
/* Initialize the hwcap module if necessary. */
mono_hwcap_init ();
/* call out to cpu detection code here that sets the defaults ... */
if (cpu_opts) {
#ifndef MONO_CROSS_COMPILE
opt |= mono_arch_cpu_optimizations (&exclude);
opt &= ~exclude;
#endif
}
if (!p)
return opt;
parts = g_strsplit (p, ",", -1);
for (ptr = parts; ptr && *ptr; ptr ++) {
char *arg = *ptr;
char *p = arg;
if (*p == '-') {
p++;
invert = TRUE;
} else {
invert = FALSE;
}
for (i = 0; i < G_N_ELEMENTS (opt_names) && optflag_get_name (i); ++i) {
n = optflag_get_name (i);
if (!strcmp (p, n)) {
if (invert)
opt &= ~ (1 << i);
else
opt |= 1 << i;
break;
}
}
if (i == G_N_ELEMENTS (opt_names) || !optflag_get_name (i)) {
if (strncmp (p, "all", 3) == 0) {
if (invert)
opt = 0;
else
opt = ~(EXCLUDED_FROM_ALL | exclude);
} else {
fprintf (stderr, "Invalid optimization name `%s'\n", p);
exit (1);
}
}
g_free (arg);
}
g_free (parts);
return opt;
}
static gboolean
parse_debug_options (const char* p)
{
MonoDebugOptions *opt = mini_get_debug_options ();
opt->enabled = TRUE;
do {
if (!*p) {
fprintf (stderr, "Syntax error; expected debug option name\n");
return FALSE;
}
if (!strncmp (p, "casts", 5)) {
opt->better_cast_details = TRUE;
p += 5;
} else if (!strncmp (p, "mdb-optimizations", 17)) {
opt->mdb_optimizations = TRUE;
p += 17;
} else if (!strncmp (p, "ignore", 6)) {
opt->enabled = FALSE;
p += 6;
} else {
fprintf (stderr, "Invalid debug option `%s', use --help-debug for details\n", p);
return FALSE;
}
if (*p == ',') {
p++;
if (!*p) {
fprintf (stderr, "Syntax error; expected debug option name\n");
return FALSE;
}
}
} while (*p);
return TRUE;
}
typedef struct {
char name [6];
char desc [18];
MonoGraphOptions value;
} GraphName;
static const GraphName
graph_names [] = {
{"cfg", "Control Flow", MONO_GRAPH_CFG},
{"dtree", "Dominator Tree", MONO_GRAPH_DTREE},
{"code", "CFG showing code", MONO_GRAPH_CFG_CODE},
{"ssa", "CFG after SSA", MONO_GRAPH_CFG_SSA},
{"optc", "CFG after IR opts", MONO_GRAPH_CFG_OPTCODE}
};
static MonoGraphOptions
mono_parse_graph_options (const char* p)
{
const char *n;
int i, len;
for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) {
n = graph_names [i].name;
len = strlen (n);
if (strncmp (p, n, len) == 0)
return graph_names [i].value;
}
fprintf (stderr, "Invalid graph name provided: %s\n", p);
exit (1);
}
/**
* mono_parse_default_optimizations:
*/
int
mono_parse_default_optimizations (const char* p)
{
guint32 opt;
opt = parse_optimizations (DEFAULT_OPTIMIZATIONS, p, TRUE);
return opt;
}
char*
mono_opt_descr (guint32 flags) {
GString *str = g_string_new ("");
int i;
gboolean need_comma;
need_comma = FALSE;
for (i = 0; i < G_N_ELEMENTS (opt_names); ++i) {
if (flags & (1 << i) && optflag_get_name (i)) {
if (need_comma)
g_string_append_c (str, ',');
g_string_append (str, optflag_get_name (i));
need_comma = TRUE;
}
}
return g_string_free (str, FALSE);
}
static const guint32
opt_sets [] = {
0,
MONO_OPT_PEEPHOLE,
MONO_OPT_BRANCH,
MONO_OPT_CFOLD,
MONO_OPT_FCMOV,
MONO_OPT_ALIAS_ANALYSIS,
#ifdef MONO_ARCH_SIMD_INTRINSICS
MONO_OPT_SIMD | MONO_OPT_INTRINS,
MONO_OPT_SSE2,
MONO_OPT_SIMD | MONO_OPT_SSE2 | MONO_OPT_INTRINS,
#endif
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_INTRINS | MONO_OPT_ALIAS_ANALYSIS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_CFOLD,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_ALIAS_ANALYSIS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_TAILCALL,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_SSA,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_ABCREM,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_ABCREM,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV,
DEFAULT_OPTIMIZATIONS,
};
static const guint32
interp_opt_sets [] = {
INTERP_OPT_NONE,
INTERP_OPT_INLINE,
INTERP_OPT_CPROP,
INTERP_OPT_SUPER_INSTRUCTIONS,
INTERP_OPT_INLINE | INTERP_OPT_CPROP,
INTERP_OPT_INLINE | INTERP_OPT_SUPER_INSTRUCTIONS,
INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS,
INTERP_OPT_INLINE | INTERP_OPT_CPROP | INTERP_OPT_SUPER_INSTRUCTIONS | INTERP_OPT_BBLOCKS,
};
static const char* const
interp_opflags_names [] = {
"inline",
"cprop",
"super-insn",
"bblocks"
};
static const char*
interp_optflag_get_name (guint32 i)
{
g_assert (i < G_N_ELEMENTS (interp_opflags_names));
return interp_opflags_names [i];
}
static char*
interp_opt_descr (guint32 flags)
{
GString *str = g_string_new ("");
int i;
gboolean need_comma;
need_comma = FALSE;
for (i = 0; i < G_N_ELEMENTS (interp_opflags_names); ++i) {
if (flags & (1 << i) && interp_optflag_get_name (i)) {
if (need_comma)
g_string_append_c (str, ',');
g_string_append (str, interp_optflag_get_name (i));
need_comma = TRUE;
}
}
return g_string_free (str, FALSE);
}
typedef int (*TestMethod) (void);
#if 0
static void
domain_dump_native_code (MonoDomain *domain) {
// need to poke into the domain, move to metadata/domain.c
// need to empty jit_info_table and code_mp
}
#endif
static gboolean do_regression_retries;
static int regression_test_skip_index;
static gboolean
method_should_be_regression_tested (MonoMethod *method, gboolean interp)
{
ERROR_DECL (error);
if (strncmp (method->name, "test_", 5) != 0)
return FALSE;
static gboolean filter_method_init = FALSE;
static const char *filter_method = NULL;
if (!filter_method_init) {
filter_method = g_getenv ("REGRESSION_FILTER_METHOD");
filter_method_init = TRUE;
}
if (filter_method) {
const char *name = filter_method;
if ((strchr (name, '.') > name) || strchr (name, ':')) {
MonoMethodDesc *desc = mono_method_desc_new (name, TRUE);
gboolean res = mono_method_desc_full_match (desc, method);
mono_method_desc_free (desc);
return res;
} else {
return strcmp (method->name, name) == 0;
}
}
MonoCustomAttrInfo* ainfo = mono_custom_attrs_from_method_checked (method, error);
mono_error_cleanup (error);
if (!ainfo)
return TRUE;
int j;
for (j = 0; j < ainfo->num_attrs; ++j) {
MonoCustomAttrEntry *centry = &ainfo->attrs [j];
if (centry->ctor == NULL)
continue;
MonoClass *klass = centry->ctor->klass;
if (strcmp (m_class_get_name (klass), "CategoryAttribute") || mono_method_signature_internal (centry->ctor)->param_count != 1)
continue;
gpointer *typed_args, *named_args;
int num_named_args;
CattrNamedArg *arginfo;
mono_reflection_create_custom_attr_data_args_noalloc (
mono_defaults.corlib, centry->ctor, centry->data, centry->data_size,
&typed_args, &named_args, &num_named_args, &arginfo, error);
if (!is_ok (error))
continue;
const char *arg = (const char*)typed_args [0];
mono_metadata_decode_value (arg, &arg);
char *utf8_str = (char*)arg; //this points into image memory that is constant
g_free (typed_args);
g_free (named_args);
g_free (arginfo);
if (interp && !strcmp (utf8_str, "!INTERPRETER")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
#if HOST_WASM
if (!strcmp (utf8_str, "!WASM")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
#endif
if (mono_aot_mode == MONO_AOT_MODE_FULL && !strcmp (utf8_str, "!FULLAOT")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
if ((mono_aot_mode == MONO_AOT_MODE_INTERP_LLVMONLY || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && !strcmp (utf8_str, "!BITCODE")) {
g_print ("skip %s...\n", method->name);
return FALSE;
}
}
return TRUE;
}
static void
mini_regression_step (MonoImage *image, int verbose, int *total_run, int *total,
guint32 opt_flags, GTimer *timer)
{
int result, expected, failed, cfailed, run, code_size;
double elapsed, comp_time, start_time;
char *n;
int i;
mono_set_defaults (verbose, opt_flags);
n = mono_opt_descr (opt_flags);
g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n);
g_free (n);
cfailed = failed = run = code_size = 0;
comp_time = elapsed = 0.0;
int local_skip_index = 0;
MonoJitMemoryManager *jit_mm = get_default_jit_mm ();
g_hash_table_destroy (jit_mm->jit_trampoline_hash);
jit_mm->jit_trampoline_hash = g_hash_table_new (mono_aligned_addr_hash, NULL);
mono_internal_hash_table_destroy (&(jit_mm->jit_code_hash));
mono_jit_code_hash_init (&(jit_mm->jit_code_hash));
g_timer_start (timer);
if (mini_stats_fd)
fprintf (mini_stats_fd, "[");
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
continue;
}
if (method_should_be_regression_tested (method, FALSE)) {
MonoCompile *cfg = NULL;
TestMethod func = NULL;
expected = atoi (method->name + 5);
run++;
start_time = g_timer_elapsed (timer, NULL);
#ifdef DISABLE_JIT
#ifdef MONO_USE_AOT_COMPILER
ERROR_DECL (error);
func = (TestMethod)mono_aot_get_method (method, error);
mono_error_cleanup (error);
#else
g_error ("No JIT or AOT available, regression testing not possible!");
#endif
#else
comp_time -= start_time;
cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, opt_flags), JIT_FLAG_RUN_CCTORS, 0, -1);
comp_time += g_timer_elapsed (timer, NULL);
if (cfg->exception_type == MONO_EXCEPTION_NONE) {
#ifdef MONO_USE_AOT_COMPILER
ERROR_DECL (error);
func = (TestMethod)mono_aot_get_method (method, error);
mono_error_cleanup (error);
if (!func) {
func = (TestMethod)MINI_ADDR_TO_FTNPTR (cfg->native_code);
}
#else
func = (TestMethod)(gpointer)cfg->native_code;
func = MINI_ADDR_TO_FTNPTR (func);
#endif
func = (TestMethod)mono_create_ftnptr ((gpointer)func);
}
#endif
if (func) {
if (do_regression_retries) {
++local_skip_index;
if(local_skip_index <= regression_test_skip_index)
continue;
++regression_test_skip_index;
}
if (verbose >= 2)
g_print ("Running '%s' ...\n", method->name);
#if HOST_WASM
//WASM AOT injects dummy args and we must call with exact signatures
int (*func_2)(int) = (int (*)(int))(void*)func;
result = func_2 (-1);
#else
result = func ();
#endif
if (result != expected) {
failed++;
g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected);
}
if (cfg) {
code_size += cfg->code_len;
mono_destroy_compile (cfg);
}
} else {
cfailed++;
g_print ("Test '%s' failed compilation.\n", method->name);
}
if (mini_stats_fd)
fprintf (mini_stats_fd, "%f, ",
g_timer_elapsed (timer, NULL) - start_time);
}
}
if (mini_stats_fd)
fprintf (mini_stats_fd, "],\n");
g_timer_stop (timer);
elapsed = g_timer_elapsed (timer, NULL);
if (failed > 0 || cfailed > 0){
g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n",
run, failed, cfailed, 100.0*(run-failed-cfailed)/run);
} else {
g_print ("Results: total tests: %d, all pass \n", run);
}
g_print ("Elapsed time: %f secs (%f, %f), Code size: %d\n\n", elapsed,
elapsed - comp_time, comp_time, code_size);
*total += failed + cfailed;
*total_run += run;
}
static int
mini_regression (MonoImage *image, int verbose, int *total_run)
{
guint32 i, opt;
MonoMethod *method;
char *n;
GTimer *timer = g_timer_new ();
guint32 exclude = 0;
int total;
/* Note: mono_hwcap_init () called in mono_init () before we get here. */
mono_arch_cpu_optimizations (&exclude);
if (mini_stats_fd) {
fprintf (mini_stats_fd, "$stattitle = \'Mono Benchmark Results (various optimizations)\';\n");
fprintf (mini_stats_fd, "$graph->set_legend(qw(");
for (opt = 0; opt < G_N_ELEMENTS (opt_sets); opt++) {
guint32 opt_flags = opt_sets [opt];
n = mono_opt_descr (opt_flags);
if (!n [0])
n = (char *)"none";
if (opt)
fprintf (mini_stats_fd, " ");
fprintf (mini_stats_fd, "%s", n);
}
fprintf (mini_stats_fd, "));\n");
fprintf (mini_stats_fd, "@data = (\n");
fprintf (mini_stats_fd, "[");
}
/* load the metadata */
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error);
continue;
}
mono_class_init_internal (method->klass);
if (!strncmp (method->name, "test_", 5) && mini_stats_fd) {
fprintf (mini_stats_fd, "\"%s\",", method->name);
}
}
if (mini_stats_fd)
fprintf (mini_stats_fd, "],\n");
total = 0;
*total_run = 0;
if (mono_do_single_method_regression) {
GSList *iter;
mini_regression_step (image, verbose, total_run, &total,
0, timer);
if (total)
return total;
g_print ("Single method regression: %d methods\n", g_slist_length (mono_single_method_list));
for (iter = mono_single_method_list; iter; iter = g_slist_next (iter)) {
char *method_name;
mono_current_single_method = (MonoMethod *)iter->data;
method_name = mono_method_full_name (mono_current_single_method, TRUE);
g_print ("Current single method: %s\n", method_name);
g_free (method_name);
mini_regression_step (image, verbose, total_run, &total,
0, timer);
if (total)
return total;
}
} else {
for (opt = 0; opt < G_N_ELEMENTS (opt_sets); ++opt) {
/* builtin-types.cs & aot-tests.cs need OPT_INTRINS enabled */
if (!strcmp ("builtin-types", image->assembly_name) || !strcmp ("aot-tests", image->assembly_name))
if (!(opt_sets [opt] & MONO_OPT_INTRINS))
continue;
//we running in AOT only, it makes no sense to try multiple flags
if ((mono_aot_mode == MONO_AOT_MODE_FULL || mono_aot_mode == MONO_AOT_MODE_LLVMONLY) && opt_sets [opt] != DEFAULT_OPTIMIZATIONS) {
continue;
}
mini_regression_step (image, verbose, total_run, &total,
opt_sets [opt] & ~exclude, timer);
}
}
if (mini_stats_fd) {
fprintf (mini_stats_fd, ");\n");
fflush (mini_stats_fd);
}
g_timer_destroy (timer);
return total;
}
static int
mini_regression_list (int verbose, int count, char *images [])
{
int i, total, total_run, run;
MonoAssembly *ass;
total_run = total = 0;
for (i = 0; i < count; ++i) {
MonoAssemblyOpenRequest req;
mono_assembly_request_prepare_open (&req, mono_alc_get_default ());
ass = mono_assembly_request_open (images [i], &req, NULL);
if (!ass) {
g_warning ("failed to load assembly: %s", images [i]);
continue;
}
total += mini_regression (mono_assembly_get_image_internal (ass), verbose, &run);
total_run += run;
}
if (total > 0){
g_print ("Overall results: tests: %d, failed: %d, opt combinations: %d (pass: %.2f%%)\n",
total_run, total, (int)G_N_ELEMENTS (opt_sets), 100.0*(total_run-total)/total_run);
} else {
g_print ("Overall results: tests: %d, 100%% pass, opt combinations: %d\n",
total_run, (int)G_N_ELEMENTS (opt_sets));
}
return total;
}
static void
interp_regression_step (MonoImage *image, int verbose, int *total_run, int *total, const guint32 *opt_flags, GTimer *timer)
{
int result, expected, failed, cfailed, run;
double elapsed, transform_time;
int i;
MonoObject *result_obj;
int local_skip_index = 0;
const char *n = NULL;
if (opt_flags) {
mini_get_interp_callbacks ()->set_optimizations (*opt_flags);
n = interp_opt_descr (*opt_flags);
} else {
n = mono_interp_opts_string;
}
g_print ("Test run: image=%s, opts=%s\n", mono_image_get_filename (image), n);
cfailed = failed = run = 0;
transform_time = elapsed = 0.0;
mini_get_interp_callbacks ()->invalidate_transformed ();
g_timer_start (timer);
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
MonoMethod *method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
continue;
}
if (method_should_be_regression_tested (method, TRUE)) {
ERROR_DECL (interp_error);
MonoObject *exc = NULL;
if (do_regression_retries) {
++local_skip_index;
if(local_skip_index <= regression_test_skip_index)
continue;
++regression_test_skip_index;
}
result_obj = mini_get_interp_callbacks ()->runtime_invoke (method, NULL, NULL, &exc, interp_error);
if (!is_ok (interp_error)) {
cfailed++;
g_print ("Test '%s' execution failed.\n", method->name);
} else if (exc != NULL) {
g_print ("Exception in Test '%s' occurred:\n", method->name);
mono_object_describe (exc);
run++;
failed++;
} else {
result = *(gint32 *) mono_object_unbox_internal (result_obj);
expected = atoi (method->name + 5); // FIXME: oh no.
run++;
if (result != expected) {
failed++;
g_print ("Test '%s' failed result (got %d, expected %d).\n", method->name, result, expected);
}
}
}
}
g_timer_stop (timer);
elapsed = g_timer_elapsed (timer, NULL);
if (failed > 0 || cfailed > 0){
g_print ("Results: total tests: %d, failed: %d, cfailed: %d (pass: %.2f%%)\n",
run, failed, cfailed, 100.0*(run-failed-cfailed)/run);
} else {
g_print ("Results: total tests: %d, all pass \n", run);
}
g_print ("Elapsed time: %f secs (%f, %f)\n\n", elapsed,
elapsed - transform_time, transform_time);
*total += failed + cfailed;
*total_run += run;
}
static int
interp_regression (MonoImage *image, int verbose, int *total_run)
{
MonoMethod *method;
GTimer *timer = g_timer_new ();
guint32 i;
int total;
/* load the metadata */
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
method = mono_get_method_checked (image, MONO_TOKEN_METHOD_DEF | (i + 1), NULL, NULL, error);
if (!method) {
mono_error_cleanup (error);
continue;
}
mono_class_init_internal (method->klass);
}
total = 0;
*total_run = 0;
if (mono_interp_opts_string) {
/* explicit option requested*/
interp_regression_step (image, verbose, total_run, &total, NULL, timer);
} else {
for (int opt = 0; opt < G_N_ELEMENTS (interp_opt_sets); ++opt)
interp_regression_step (image, verbose, total_run, &total, &interp_opt_sets [opt], timer);
}
g_timer_destroy (timer);
return total;
}
/* TODO: merge this code with the regression harness of the JIT */
static int
mono_interp_regression_list (int verbose, int count, char *images [])
{
int i, total, total_run, run;
total_run = total = 0;
for (i = 0; i < count; ++i) {
MonoAssemblyOpenRequest req;
mono_assembly_request_prepare_open (&req, mono_alc_get_default ());
MonoAssembly *ass = mono_assembly_request_open (images [i], &req, NULL);
if (!ass) {
g_warning ("failed to load assembly: %s", images [i]);
continue;
}
total += interp_regression (mono_assembly_get_image_internal (ass), verbose, &run);
total_run += run;
}
if (total > 0) {
g_print ("Overall results: tests: %d, failed: %d (pass: %.2f%%)\n", total_run, total, 100.0*(total_run-total)/total_run);
} else {
g_print ("Overall results: tests: %d, 100%% pass\n", total_run);
}
return total;
}
#ifdef MONO_JIT_INFO_TABLE_TEST
typedef struct _JitInfoData
{
guint start;
guint length;
MonoJitInfo *ji;
struct _JitInfoData *next;
} JitInfoData;
typedef struct
{
guint start;
guint length;
int num_datas;
JitInfoData *data;
} Region;
typedef struct
{
int num_datas;
int num_regions;
Region *regions;
int num_frees;
JitInfoData *frees;
} ThreadData;
static int num_threads;
static ThreadData *thread_datas;
static MonoDomain *test_domain;
static JitInfoData*
alloc_random_data (Region *region)
{
JitInfoData **data;
JitInfoData *prev;
guint prev_end;
guint next_start;
guint max_len;
JitInfoData *d;
int num_retries = 0;
int pos, i;
restart:
prev = NULL;
data = ®ion->data;
pos = random () % (region->num_datas + 1);
i = 0;
while (*data != NULL) {
if (i++ == pos)
break;
prev = *data;
data = &(*data)->next;
}
if (prev == NULL)
g_assert (*data == region->data);
else
g_assert (prev->next == *data);
if (prev == NULL)
prev_end = region->start;
else
prev_end = prev->start + prev->length;
if (*data == NULL)
next_start = region->start + region->length;
else
next_start = (*data)->start;
g_assert (prev_end <= next_start);
max_len = next_start - prev_end;
if (max_len < 128) {
if (++num_retries >= 10)
return NULL;
goto restart;
}
if (max_len > 1024)
max_len = 1024;
d = g_new0 (JitInfoData, 1);
d->start = prev_end + random () % (max_len / 2);
d->length = random () % MIN (max_len, next_start - d->start) + 1;
g_assert (d->start >= prev_end && d->start + d->length <= next_start);
d->ji = g_new0 (MonoJitInfo, 1);
d->ji->d.method = (MonoMethod*) 0xABadBabe;
d->ji->code_start = (gpointer)(gulong) d->start;
d->ji->code_size = d->length;
d->ji->cas_inited = 1; /* marks an allocated jit info */
d->next = *data;
*data = d;
++region->num_datas;
return d;
}
static JitInfoData**
choose_random_data (Region *region)
{
int n;
int i;
JitInfoData **d;
g_assert (region->num_datas > 0);
n = random () % region->num_datas;
for (d = ®ion->data, i = 0;
i < n;
d = &(*d)->next, ++i)
;
return d;
}
static Region*
choose_random_region (ThreadData *td)
{
return &td->regions [random () % td->num_regions];
}
static ThreadData*
choose_random_thread (void)
{
return &thread_datas [random () % num_threads];
}
static void
free_jit_info_data (ThreadData *td, JitInfoData *free)
{
free->next = td->frees;
td->frees = free;
if (++td->num_frees >= 1000) {
int i;
for (i = 0; i < 500; ++i)
free = free->next;
while (free->next != NULL) {
JitInfoData *next = free->next->next;
//g_free (free->next->ji);
g_free (free->next);
free->next = next;
--td->num_frees;
}
}
}
#define NUM_THREADS 8
#define REGIONS_PER_THREAD 10
#define REGION_SIZE 0x10000
#define MAX_ADDR (REGION_SIZE * REGIONS_PER_THREAD * NUM_THREADS)
#define MODE_ALLOC 1
#define MODE_FREE 2
static void
test_thread_func (gpointer void_arg)
{
ThreadData* td = (ThreadData*)void_arg;
int mode = MODE_ALLOC;
int i = 0;
gulong lookup_successes = 0, lookup_failures = 0;
int thread_num = (int)(td - thread_datas);
gboolean modify_thread = thread_num < NUM_THREADS / 2; /* only half of the threads modify the table */
for (;;) {
int alloc;
int lookup = 1;
if (td->num_datas == 0) {
lookup = 0;
alloc = 1;
} else if (modify_thread && random () % 1000 < 5) {
lookup = 0;
if (mode == MODE_ALLOC)
alloc = (random () % 100) < 70;
else if (mode == MODE_FREE)
alloc = (random () % 100) < 30;
}
if (lookup) {
/* modify threads sometimes look up their own jit infos */
if (modify_thread && random () % 10 < 5) {
Region *region = choose_random_region (td);
if (region->num_datas > 0) {
JitInfoData **data = choose_random_data (region);
guint pos = (*data)->start + random () % (*data)->length;
MonoJitInfo *ji;
ji = mono_jit_info_table_find_internal ((char*)(gsize)pos, TRUE, FALSE);
g_assert (ji->cas_inited);
g_assert ((*data)->ji == ji);
}
} else {
int pos = random () % MAX_ADDR;
char *addr = (char*)(uintptr_t)pos;
MonoJitInfo *ji;
ji = mono_jit_info_table_find_internal (addr, TRUE, FALSE);
/*
* FIXME: We are actually not allowed
* to do this. By the time we examine
* the ji another thread might already
* have removed it.
*/
if (ji != NULL) {
g_assert (addr >= (char*)ji->code_start && addr < (char*)ji->code_start + ji->code_size);
++lookup_successes;
} else
++lookup_failures;
}
} else if (alloc) {
JitInfoData *data = alloc_random_data (choose_random_region (td));
if (data != NULL) {
mono_jit_info_table_add (domain, data->ji);
++td->num_datas;
}
} else {
Region *region = choose_random_region (td);
if (region->num_datas > 0) {
JitInfoData **data = choose_random_data (region);
JitInfoData *free;
mono_jit_info_table_remove (domain, (*data)->ji);
//(*data)->ji->cas_inited = 0; /* marks a free jit info */
free = *data;
*data = (*data)->next;
free_jit_info_data (td, free);
--region->num_datas;
--td->num_datas;
}
}
if (++i % 100000 == 0) {
int j;
g_print ("num datas %d (%ld - %ld): %d", (int)(td - thread_datas),
lookup_successes, lookup_failures, td->num_datas);
for (j = 0; j < td->num_regions; ++j)
g_print (" %d", td->regions [j].num_datas);
g_print ("\n");
}
if (td->num_datas < 100)
mode = MODE_ALLOC;
else if (td->num_datas > 2000)
mode = MODE_FREE;
}
}
/*
static void
small_id_thread_func (gpointer arg)
{
MonoThread *thread = mono_thread_current ();
MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
g_print ("my small id is %d\n", (int)thread->small_id);
mono_hazard_pointer_clear (hp, 1);
sleep (3);
g_print ("done %d\n", (int)thread->small_id);
}
*/
static void
jit_info_table_test (MonoDomain *domain)
{
ERROR_DECL (error);
int i;
g_print ("testing jit_info_table\n");
num_threads = NUM_THREADS;
thread_datas = g_new0 (ThreadData, num_threads);
for (i = 0; i < num_threads; ++i) {
int j;
thread_datas [i].num_regions = REGIONS_PER_THREAD;
thread_datas [i].regions = g_new0 (Region, REGIONS_PER_THREAD);
for (j = 0; j < REGIONS_PER_THREAD; ++j) {
thread_datas [i].regions [j].start = (num_threads * j + i) * REGION_SIZE;
thread_datas [i].regions [j].length = REGION_SIZE;
}
}
test_domain = domain;
/*
for (i = 0; i < 72; ++i)
mono_thread_create (small_id_thread_func, NULL);
sleep (2);
*/
for (i = 0; i < num_threads; ++i) {
mono_thread_create_checked ((MonoThreadStart)test_thread_func, &thread_datas [i], error);
mono_error_assert_ok (error);
}
}
#endif
enum {
DO_BENCH,
DO_REGRESSION,
DO_SINGLE_METHOD_REGRESSION,
DO_COMPILE,
DO_EXEC,
DO_DRAW,
DO_DEBUGGER
};
typedef struct CompileAllThreadArgs {
MonoAssembly *ass;
int verbose;
guint32 opts;
guint32 recompilation_times;
} CompileAllThreadArgs;
static void
compile_all_methods_thread_main_inner (CompileAllThreadArgs *args)
{
MonoAssembly *ass = args->ass;
int verbose = args->verbose;
MonoImage *image = mono_assembly_get_image_internal (ass);
MonoMethod *method;
MonoCompile *cfg;
int i, count = 0, fail_count = 0;
for (i = 0; i < mono_image_get_table_rows (image, MONO_TABLE_METHOD); ++i) {
ERROR_DECL (error);
guint32 token = MONO_TOKEN_METHOD_DEF | (i + 1);
MonoMethodSignature *sig;
if (mono_metadata_has_generic_params (image, token))
continue;
method = mono_get_method_checked (image, token, NULL, NULL, error);
if (!method) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
continue;
}
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
(method->flags & METHOD_ATTRIBUTE_ABSTRACT))
continue;
if (mono_class_is_gtd (method->klass))
continue;
sig = mono_method_signature_internal (method);
if (!sig) {
char * desc = mono_method_full_name (method, TRUE);
g_print ("Could not retrieve method signature for %s\n", desc);
g_free (desc);
fail_count ++;
continue;
}
if (sig->has_type_parameters)
continue;
count++;
if (verbose) {
char * desc = mono_method_full_name (method, TRUE);
g_print ("Compiling %d %s\n", count, desc);
g_free (desc);
}
if (mono_use_interpreter) {
mini_get_interp_callbacks ()->create_method_pointer (method, TRUE, error);
// FIXME There are a few failures due to DllNotFoundException related to System.Native
if (verbose && !is_ok (error))
g_print ("Compilation of %s failed\n", mono_method_full_name (method, TRUE));
} else {
cfg = mini_method_compile (method, mono_get_optimizations_for_method (method, args->opts), (JitFlags)JIT_FLAG_DISCARD_RESULTS, 0, -1);
if (cfg->exception_type != MONO_EXCEPTION_NONE) {
const char *msg = cfg->exception_message;
if (cfg->exception_type == MONO_EXCEPTION_MONO_ERROR)
msg = mono_error_get_message (cfg->error);
g_print ("Compilation of %s failed with exception '%s':\n", mono_method_full_name (cfg->method, TRUE), msg);
fail_count ++;
}
mono_destroy_compile (cfg);
}
}
if (fail_count)
exit (1);
}
static void
compile_all_methods_thread_main (gpointer void_args)
{
CompileAllThreadArgs *args = (CompileAllThreadArgs*)void_args;
guint32 i;
for (i = 0; i < args->recompilation_times; ++i)
compile_all_methods_thread_main_inner (args);
}
static void
compile_all_methods (MonoAssembly *ass, int verbose, guint32 opts, guint32 recompilation_times)
{
ERROR_DECL (error);
CompileAllThreadArgs args;
args.ass = ass;
args.verbose = verbose;
args.opts = opts;
args.recompilation_times = recompilation_times;
/*
* Need to create a mono thread since compilation might trigger
* running of managed code.
*/
mono_thread_create_checked ((MonoThreadStart)compile_all_methods_thread_main, &args, error);
mono_error_assert_ok (error);
mono_thread_manage_internal ();
}
/**
* mono_jit_exec:
* \param assembly reference to an assembly
* \param argc argument count
* \param argv argument vector
* Start execution of a program.
*/
int
mono_jit_exec (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[])
{
int rv;
MONO_ENTER_GC_UNSAFE;
rv = mono_jit_exec_internal (domain, assembly, argc, argv);
MONO_EXIT_GC_UNSAFE;
return rv;
}
int
mono_jit_exec_internal (MonoDomain *domain, MonoAssembly *assembly, int argc, char *argv[])
{
MONO_REQ_GC_UNSAFE_MODE;
ERROR_DECL (error);
MonoImage *image = mono_assembly_get_image_internal (assembly);
// We need to ensure that any module cctor for this image
// is run *before* we invoke the entry point
// For more information, see https://blogs.msdn.microsoft.com/junfeng/2005/11/19/module-initializer-a-k-a-module-constructor/
//
// This is required in order for tools like Costura
// (https://github.com/Fody/Costura) to work properly, as they inject
// a module initializer which sets up event handlers (e.g. AssemblyResolve)
// that allow the main method to run properly
if (!mono_runtime_run_module_cctor(image, error)) {
g_print ("Failed to run module constructor due to %s\n", mono_error_get_message (error));
return 1;
}
MonoMethod *method;
guint32 entry = mono_image_get_entry_point (image);
if (!entry) {
g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image));
/* FIXME: remove this silly requirement. */
mono_environment_exitcode_set (1);
return 1;
}
method = mono_get_method_checked (image, entry, NULL, NULL, error);
if (method == NULL){
g_print ("The entry point method could not be loaded due to %s\n", mono_error_get_message (error));
mono_error_cleanup (error);
mono_environment_exitcode_set (1);
return 1;
}
if (mono_llvm_only) {
MonoObject *exc = NULL;
int res;
res = mono_runtime_try_run_main (method, argc, argv, &exc);
if (exc) {
mono_unhandled_exception_internal (exc);
mono_invoke_unhandled_exception_hook (exc);
g_assert_not_reached ();
}
return res;
} else {
int res = mono_runtime_run_main_checked (method, argc, argv, error);
if (!is_ok (error)) {
MonoException *ex = mono_error_convert_to_exception (error);
if (ex) {
mono_unhandled_exception_internal (&ex->object);
mono_invoke_unhandled_exception_hook (&ex->object);
g_assert_not_reached ();
}
}
return res;
}
}
typedef struct
{
MonoDomain *domain;
const char *file;
int argc;
char **argv;
guint32 opts;
char *aot_options;
} MainThreadArgs;
static void main_thread_handler (gpointer user_data)
{
MainThreadArgs *main_args = (MainThreadArgs *)user_data;
MonoAssembly *assembly;
if (mono_compile_aot) {
int i, res;
gpointer *aot_state = NULL;
/* Treat the other arguments as assemblies to compile too */
for (i = 0; i < main_args->argc; ++i) {
assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->argv [i]);
if (!assembly) {
fprintf (stderr, "Can not open image %s\n", main_args->argv [i]);
exit (1);
}
/* Check that the assembly loaded matches the filename */
{
MonoImageOpenStatus status;
MonoImage *img;
img = mono_image_open (main_args->argv [i], &status);
if (img && strcmp (img->name, assembly->image->name)) {
fprintf (stderr, "Error: Loaded assembly '%s' doesn't match original file name '%s'. Set MONO_PATH to the assembly's location.\n", assembly->image->name, img->name);
exit (1);
}
}
res = mono_compile_assembly (assembly, main_args->opts, main_args->aot_options, &aot_state);
if (res != 0) {
fprintf (stderr, "AOT of image %s failed.\n", main_args->argv [i]);
exit (1);
}
}
if (aot_state) {
res = mono_compile_deferred_assemblies (main_args->opts, main_args->aot_options, &aot_state);
if (res != 0) {
fprintf (stderr, "AOT of mode-specific deferred assemblies failed.\n");
exit (1);
}
}
} else {
assembly = mono_domain_assembly_open_internal (mono_alc_get_default (), main_args->file);
if (!assembly){
fprintf (stderr, "Can not open image %s\n", main_args->file);
exit (1);
}
/*
* This must be done in a thread managed by mono since it can invoke
* managed code.
*/
if (main_args->opts & MONO_OPT_PRECOMP)
mono_precompile_assemblies ();
mono_jit_exec (main_args->domain, assembly, main_args->argc, main_args->argv);
}
}
static int
load_agent (MonoDomain *domain, char *desc)
{
ERROR_DECL (error);
char* col = strchr (desc, ':');
char *agent, *args;
MonoAssembly *agent_assembly;
MonoImage *image;
MonoMethod *method;
guint32 entry;
MonoArray *main_args;
gpointer pa [1];
MonoImageOpenStatus open_status;
if (col) {
agent = (char *)g_memdup (desc, col - desc + 1);
agent [col - desc] = '\0';
args = col + 1;
} else {
agent = g_strdup (desc);
args = NULL;
}
MonoAssemblyOpenRequest req;
mono_assembly_request_prepare_open (&req, mono_alc_get_default ());
agent_assembly = mono_assembly_request_open (agent, &req, &open_status);
if (!agent_assembly) {
fprintf (stderr, "Cannot open agent assembly '%s': %s.\n", agent, mono_image_strerror (open_status));
g_free (agent);
return 2;
}
/*
* Can't use mono_jit_exec (), as it sets things which might confuse the
* real Main method.
*/
image = mono_assembly_get_image_internal (agent_assembly);
entry = mono_image_get_entry_point (image);
if (!entry) {
g_print ("Assembly '%s' doesn't have an entry point.\n", mono_image_get_filename (image));
g_free (agent);
return 1;
}
method = mono_get_method_checked (image, entry, NULL, NULL, error);
if (method == NULL){
g_print ("The entry point method of assembly '%s' could not be loaded due to %s\n", agent, mono_error_get_message (error));
mono_error_cleanup (error);
g_free (agent);
return 1;
}
mono_thread_set_main (mono_thread_current ());
if (args) {
main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 1, error);
if (main_args) {
MonoString *str = mono_string_new_checked (args, error);
if (str)
mono_array_set_internal (main_args, MonoString*, 0, str);
}
} else {
main_args = (MonoArray*)mono_array_new_checked (mono_defaults.string_class, 0, error);
}
if (!main_args) {
g_print ("Could not allocate array for main args of assembly '%s' due to %s\n", agent, mono_error_get_message (error));
mono_error_cleanup (error);
g_free (agent);
return 1;
}
pa [0] = main_args;
/* Pass NULL as 'exc' so unhandled exceptions abort the runtime */
mono_runtime_invoke_checked (method, NULL, pa, error);
if (!is_ok (error)) {
g_print ("The entry point method of assembly '%s' could not execute due to %s\n", agent, mono_error_get_message (error));
mono_error_cleanup (error);
g_free (agent);
return 1;
}
g_free (agent);
return 0;
}
static void
mini_usage_jitdeveloper (void)
{
int i;
fprintf (stdout,
"Runtime and JIT debugging options:\n"
" --apply-bindings=FILE Apply assembly bindings from FILE (only for AOT)\n"
" --breakonex Inserts a breakpoint on exceptions\n"
" --break METHOD Inserts a breakpoint at METHOD entry\n"
" --break-at-bb METHOD N Inserts a breakpoint in METHOD at BB N\n"
" --compile METHOD Just compile METHOD in assembly\n"
" --compile-all=N Compiles all the methods in the assembly multiple times (default: 1)\n"
" --ncompile N Number of times to compile METHOD (default: 1)\n"
" --print-vtable Print the vtable of all used classes\n"
" --regression Runs the regression test contained in the assembly\n"
" --single-method=OPTS Runs regressions with only one method optimized with OPTS at any time\n"
" --statfile FILE Sets the stat file to FILE\n"
" --stats Print statistics about the JIT operations\n"
" --inject-async-exc METHOD OFFSET Inject an asynchronous exception at METHOD\n"
" --verify-all Run the verifier on all assemblies and methods\n"
" --full-aot Avoid JITting any code\n"
" --llvmonly Use LLVM compiled code only\n"
" --agent=ASSEMBLY[:ARG] Loads the specific agent assembly and executes its Main method with the given argument before loading the main assembly.\n"
" --no-x86-stack-align Don't align stack on x86\n"
"\n"
"The options supported by MONO_DEBUG can also be passed on the command line.\n"
"\n"
"Other options:\n"
" --graph[=TYPE] METHOD Draws a graph of the specified method:\n");
for (i = 0; i < G_N_ELEMENTS (graph_names); ++i) {
fprintf (stdout, " %-10s %s\n", graph_names [i].name, graph_names [i].desc);
}
}
static void
mini_usage_list_opt (void)
{
int i;
for (i = 0; i < G_N_ELEMENTS (opt_names); ++i)
fprintf (stdout, " %-10s %s\n", optflag_get_name (i), optflag_get_desc (i));
}
static void
mini_usage (void)
{
fprintf (stdout,
"Usage is: mono [options] program [program-options]\n"
"\n"
"Development:\n"
" --aot[=<options>] Compiles the assembly to native code\n"
" --debug=ignore Disable debugging support (on by default)\n"
" --debug=[<options>] Disable debugging support or enable debugging extras, use --help-debug for details\n"
" --debugger-agent=options Enable the debugger agent\n"
" --profile[=profiler] Runs in profiling mode with the specified profiler module\n"
" --trace[=EXPR] Enable tracing, use --help-trace for details\n"
#ifdef __linux__
" --jitmap Output a jit method map to /tmp/perf-PID.map\n"
#endif
#ifdef ENABLE_JIT_DUMP
" --jitdump Output a jitdump file to /tmp/jit-PID.dump\n"
#endif
" --help-devel Shows more options available to developers\n"
"\n"
"Runtime:\n"
" --config FILE Loads FILE as the Mono config\n"
" --verbose, -v Increases the verbosity level\n"
" --help, -h Show usage information\n"
" --version, -V Show version information\n"
" --version=number Show version number\n"
" --runtime=VERSION Use the VERSION runtime, instead of autodetecting\n"
" --optimize=OPT Turns on or off a specific optimization\n"
" Use --list-opt to get a list of optimizations\n"
" --attach=OPTIONS Pass OPTIONS to the attach agent in the runtime.\n"
" Currently the only supported option is 'disable'.\n"
" --llvm, --nollvm Controls whenever the runtime uses LLVM to compile code.\n"
" --gc=[sgen,boehm] Select SGen or Boehm GC (runs mono or mono-sgen)\n"
#ifdef TARGET_OSX
" --arch=[32,64] Select architecture (runs mono32 or mono64)\n"
#endif
#ifdef HOST_WIN32
" --mixed-mode Enable mixed-mode image support.\n"
#endif
" --handlers Install custom handlers, use --help-handlers for details.\n"
" --aot-path=PATH List of additional directories to search for AOT images.\n"
);
g_print ("\nOptions:\n");
mono_options_print_usage ();
}
static void
mini_trace_usage (void)
{
fprintf (stdout,
"Tracing options:\n"
" --trace[=EXPR] Trace every call, optional EXPR controls the scope\n"
"\n"
"EXPR is composed of:\n"
" all All assemblies\n"
" none No assemblies\n"
" program Entry point assembly\n"
" assembly Specifies an assembly\n"
" wrapper All wrappers bridging native and managed code\n"
" M:Type:Method Specifies a method\n"
" N:Namespace Specifies a namespace\n"
" T:Type Specifies a type\n"
" E:Type Specifies stack traces for an exception type\n"
" EXPR Includes expression\n"
" -EXPR Excludes expression\n"
" EXPR,EXPR Multiple expressions\n"
" disabled Don't print any output until toggled via SIGUSR2\n");
}
static void
mini_debug_usage (void)
{
fprintf (stdout,
"Debugging options:\n"
" --debug[=OPTIONS] Disable debugging support or enable debugging extras, optional OPTIONS is a comma\n"
" separated list of options\n"
"\n"
"OPTIONS is composed of:\n"
" ignore Disable debugging support (on by default).\n"
" casts Enable more detailed InvalidCastException messages.\n"
" mdb-optimizations Disable some JIT optimizations which are normally\n"
" disabled when running inside the debugger.\n"
" This is useful if you plan to attach to the running\n"
" process with the debugger.\n");
}
#if defined(MONO_ARCH_ARCHITECTURE)
/* Redefine MONO_ARCHITECTURE to include more information */
#undef MONO_ARCHITECTURE
#define MONO_ARCHITECTURE MONO_ARCH_ARCHITECTURE
#endif
static char *
mono_get_version_info (void)
{
GString *output;
output = g_string_new ("");
#ifdef MONO_KEYWORD_THREAD
g_string_append_printf (output, "\tTLS: __thread\n");
#else
g_string_append_printf (output, "\tTLS: \n");
#endif /* MONO_KEYWORD_THREAD */
#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
g_string_append_printf (output, "\tSIGSEGV: altstack\n");
#else
g_string_append_printf (output, "\tSIGSEGV: normal\n");
#endif
#ifdef HAVE_EPOLL
g_string_append_printf (output, "\tNotifications: epoll\n");
#elif defined(HAVE_KQUEUE)
g_string_append_printf (output, "\tNotification: kqueue\n");
#else
g_string_append_printf (output, "\tNotification: Thread + polling\n");
#endif
g_string_append_printf (output, "\tArchitecture: %s\n", MONO_ARCHITECTURE);
g_string_append_printf (output, "\tDisabled: %s\n", DISABLED_FEATURES);
g_string_append_printf (output, "\tMisc: ");
#ifdef MONO_SMALL_CONFIG
g_string_append_printf (output, "smallconfig ");
#endif
#ifdef MONO_BIG_ARRAYS
g_string_append_printf (output, "bigarrays ");
#endif
#if !defined(DISABLE_SDB)
g_string_append_printf (output, "softdebug ");
#endif
g_string_append_printf (output, "\n");
#ifndef DISABLE_INTERPRETER
g_string_append_printf (output, "\tInterpreter: yes\n");
#else
g_string_append_printf (output, "\tInterpreter: no\n");
#endif
#ifdef MONO_ARCH_LLVM_SUPPORTED
#ifdef ENABLE_LLVM
g_string_append_printf (output, "\tLLVM: yes(%d)\n", LLVM_API_VERSION);
#else
g_string_append_printf (output, "\tLLVM: supported, not enabled.\n");
#endif
#endif
mono_threads_suspend_policy_init ();
g_string_append_printf (output, "\tSuspend: %s\n", mono_threads_suspend_policy_name (mono_threads_suspend_policy ()));
return g_string_free (output, FALSE);
}
#ifndef MONO_ARCH_AOT_SUPPORTED
#define error_if_aot_unsupported() do {fprintf (stderr, "AOT compilation is not supported on this platform.\n"); exit (1);} while (0)
#else
#define error_if_aot_unsupported()
#endif
static gboolean enable_debugging;
static void
enable_runtime_stats (void)
{
mono_counters_enable (-1);
mono_atomic_store_bool (&mono_stats.enabled, TRUE);
mono_atomic_store_bool (&mono_jit_stats.enabled, TRUE);
}
static MonoMethodDesc *
parse_qualified_method_name (char *method_name)
{
if (strlen (method_name) == 0) {
g_printerr ("Couldn't parse empty method name.");
exit (1);
}
MonoMethodDesc *result = mono_method_desc_new (method_name, TRUE);
if (!result) {
g_printerr ("Couldn't parse method name: %s\n", method_name);
exit (1);
}
return result;
}
/**
* mono_jit_parse_options:
*
* Process the command line options in \p argv as done by the runtime executable.
* This should be called before \c mono_jit_init.
*/
void
mono_jit_parse_options (int argc, char * argv[])
{
int i;
char *trace_options = NULL;
int mini_verbose_level = 0;
guint32 opt;
/*
* Some options have no effect here, since they influence the behavior of
* mono_main ().
*/
opt = mono_parse_default_optimizations (NULL);
/* FIXME: Avoid code duplication */
for (i = 0; i < argc; ++i) {
if (argv [i] [0] != '-')
break;
if (strncmp (argv [i], "--debugger-agent=", 17) == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
mono_debugger_agent_parse_options (g_strdup (argv [i] + 17));
opt->mdb_optimizations = TRUE;
enable_debugging = TRUE;
} else if (!strcmp (argv [i], "--soft-breakpoints")) {
MonoDebugOptions *opt = mini_get_debug_options ();
opt->soft_breakpoints = TRUE;
opt->explicit_null_checks = TRUE;
} else if (strncmp (argv [i], "--optimize=", 11) == 0) {
opt = parse_optimizations (opt, argv [i] + 11, TRUE);
mono_set_optimizations (opt);
} else if (strncmp (argv [i], "-O=", 3) == 0) {
opt = parse_optimizations (opt, argv [i] + 3, TRUE);
mono_set_optimizations (opt);
} else if (strcmp (argv [i], "--trace") == 0) {
trace_options = (char*)"";
} else if (strncmp (argv [i], "--trace=", 8) == 0) {
trace_options = &argv [i][8];
} else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) {
mini_verbose_level++;
} else if (strcmp (argv [i], "--breakonex") == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
opt->break_on_exc = TRUE;
} else if (strcmp (argv [i], "--stats") == 0) {
enable_runtime_stats ();
} else if (strncmp (argv [i], "--stats=", 8) == 0) {
enable_runtime_stats ();
if (mono_stats_method_desc)
g_free (mono_stats_method_desc);
mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8);
} else if (strcmp (argv [i], "--break") == 0) {
if (i+1 >= argc){
fprintf (stderr, "Missing method name in --break command line option\n");
exit (1);
}
if (!mono_debugger_insert_breakpoint (argv [++i], FALSE))
fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]);
} else if (strncmp (argv[i], "--gc-params=", 12) == 0) {
mono_gc_params_set (argv[i] + 12);
} else if (strncmp (argv[i], "--gc-debug=", 11) == 0) {
mono_gc_debug_set (argv[i] + 11);
} else if (strcmp (argv [i], "--llvm") == 0) {
#ifndef MONO_ARCH_LLVM_SUPPORTED
fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n");
#elif !defined(ENABLE_LLVM)
fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n");
#else
mono_use_llvm = TRUE;
#endif
} else if (strcmp (argv [i], "--profile") == 0) {
mini_add_profiler_argument (NULL);
} else if (strncmp (argv [i], "--profile=", 10) == 0) {
mini_add_profiler_argument (argv [i] + 10);
} else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) {
} else {
fprintf (stderr, "Unsupported command line option: '%s'\n", argv [i]);
exit (1);
}
}
if (trace_options != NULL) {
/*
* Need to call this before mini_init () so we can trace methods
* compiled there too.
*/
mono_jit_trace_calls = mono_trace_set_options (trace_options);
if (mono_jit_trace_calls == NULL)
exit (1);
}
if (mini_verbose_level)
mono_set_verbose_level (mini_verbose_level);
}
static void
mono_set_use_smp (int use_smp)
{
#if HAVE_SCHED_SETAFFINITY
if (!use_smp) {
unsigned long proc_mask = 1;
#ifdef GLIBC_BEFORE_2_3_4_SCHED_SETAFFINITY
sched_setaffinity (getpid(), (gpointer)&proc_mask);
#else
sched_setaffinity (getpid(), sizeof (unsigned long), (const cpu_set_t *)&proc_mask);
#endif
}
#endif
}
static void
switch_gc (char* argv[], const char* target_gc)
{
GString *path;
if (!strcmp (mono_gc_get_gc_name (), target_gc)) {
return;
}
path = g_string_new (argv [0]);
/*Running mono without any argument*/
if (strstr (argv [0], "-sgen"))
g_string_truncate (path, path->len - 5);
else if (strstr (argv [0], "-boehm"))
g_string_truncate (path, path->len - 6);
g_string_append_c (path, '-');
g_string_append (path, target_gc);
#ifdef HAVE_EXECVP
execvp (path->str, argv);
fprintf (stderr, "Error: Failed to switch to %s gc. mono-%s is not installed.\n", target_gc, target_gc);
#else
fprintf (stderr, "Error: --gc=<NAME> option not supported on this platform.\n");
#endif
}
#ifdef TARGET_OSX
/*
* tries to increase the minimum number of files, if the number is below 1024
*/
static void
darwin_change_default_file_handles ()
{
struct rlimit limit;
if (getrlimit (RLIMIT_NOFILE, &limit) == 0){
if (limit.rlim_cur < 1024){
limit.rlim_cur = MAX(1024,limit.rlim_cur);
setrlimit (RLIMIT_NOFILE, &limit);
}
}
}
static void
switch_arch (char* argv[], const char* target_arch)
{
GString *path;
gsize arch_offset;
if ((strcmp (target_arch, "32") == 0 && strcmp (MONO_ARCHITECTURE, "x86") == 0) ||
(strcmp (target_arch, "64") == 0 && strcmp (MONO_ARCHITECTURE, "amd64") == 0)) {
return; /* matching arch loaded */
}
path = g_string_new (argv [0]);
arch_offset = path->len -2; /* last two characters */
/* Remove arch suffix if present */
if (strstr (&path->str[arch_offset], "32") || strstr (&path->str[arch_offset], "64")) {
g_string_truncate (path, arch_offset);
}
g_string_append (path, target_arch);
if (execvp (path->str, argv) < 0) {
fprintf (stderr, "Error: --arch=%s Failed to switch to '%s'.\n", target_arch, path->str);
exit (1);
}
}
#endif
#define MONO_HANDLERS_ARGUMENT "--handlers="
#define MONO_HANDLERS_ARGUMENT_LEN STRING_LENGTH(MONO_HANDLERS_ARGUMENT)
static void
apply_root_domain_configuration_file_bindings (MonoDomain *domain, char *root_domain_configuration_file)
{
g_assert_not_reached ();
}
static void
mono_check_interp_supported (void)
{
#ifdef MONO_CROSS_COMPILE
g_error ("--interpreter on cross-compile runtimes not supported\n");
#endif
#ifndef MONO_ARCH_INTERPRETER_SUPPORTED
g_error ("--interpreter not supported on this architecture.\n");
#endif
}
static int
mono_exec_regression_internal (int verbose_level, int count, char *images [], gboolean single_method)
{
mono_do_single_method_regression = single_method;
if (mono_use_interpreter) {
if (mono_interp_regression_list (verbose_level, count, images)) {
g_print ("Regression ERRORS!\n");
return 1;
}
return 0;
}
if (mini_regression_list (verbose_level, count, images)) {
g_print ("Regression ERRORS!\n");
return 1;
}
return 0;
}
/**
* Returns TRUE for success, FALSE for failure.
*/
gboolean
mono_regression_test_step (int verbose_level, const char *image, const char *method_name)
{
if (method_name) {
//TODO
} else {
do_regression_retries = TRUE;
}
char *images[] = {
(char*)image,
NULL
};
return mono_exec_regression_internal (verbose_level, 1, images, FALSE) == 0;
}
#ifdef ENABLE_ICALL_SYMBOL_MAP
/* Print the icall table as JSON */
static void
print_icall_table (void)
{
// We emit some dummy values to make the code simpler
printf ("[\n{ \"klass\": \"\", \"icalls\": [");
#define NOHANDLES(inner) inner
#define HANDLES(id, name, func, ...) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s_raw\", \"handles\": true }\n", name, #func);
#define HANDLES_REUSE_WRAPPER HANDLES
#define MONO_HANDLE_REGISTER_ICALL(...) /* nothing */
#define ICALL_TYPE(id,name,first) printf ("]},\n { \"klass\":\"%s\", \"icalls\": [{} ", name);
#define ICALL(id,name,func) printf ("\t,{ \"name\": \"%s\", \"func\": \"%s\", \"handles\": false }\n", name, #func);
#include <mono/metadata/icall-def.h>
printf ("]}\n]\n");
}
#endif
/**
* mono_main:
* \param argc number of arguments in the argv array
* \param argv array of strings containing the startup arguments
* Launches the Mono JIT engine and parses all the command line options
* in the same way that the mono command line VM would.
*/
int
mono_main (int argc, char* argv[])
{
MainThreadArgs main_args;
MonoAssembly *assembly;
MonoMethodDesc *desc;
MonoMethod *method;
MonoDomain *domain;
MonoImageOpenStatus open_status;
const char* aname, *mname = NULL;
int i;
#ifndef DISABLE_JIT
int count = 1;
MonoGraphOptions mono_graph_options = (MonoGraphOptions)0;
#endif
guint32 opt, action = DO_EXEC, recompilation_times = 1;
int mini_verbose_level = 0;
char *trace_options = NULL;
char *aot_options = NULL;
char *forced_version = NULL;
GPtrArray *agents = NULL;
char *extra_bindings_config_file = NULL;
#ifdef MONO_JIT_INFO_TABLE_TEST
int test_jit_info_table = FALSE;
#endif
#ifdef HOST_WIN32
int mixed_mode = FALSE;
#endif
ERROR_DECL (error);
#ifdef MOONLIGHT
#ifndef HOST_WIN32
/* stdout defaults to block buffering if it's not writing to a terminal, which
* happens with our test harness: we redirect stdout to capture it. Force line
* buffering in all cases. */
setlinebuf (stdout);
#endif
#endif
setlocale (LC_ALL, "");
#if TARGET_OSX
darwin_change_default_file_handles ();
#endif
if (g_hasenv ("MONO_NO_SMP"))
mono_set_use_smp (FALSE);
#ifdef MONO_JEMALLOC_ENABLED
gboolean use_jemalloc = FALSE;
#ifdef MONO_JEMALLOC_DEFAULT
use_jemalloc = TRUE;
#endif
if (!use_jemalloc)
use_jemalloc = g_hasenv ("MONO_USE_JEMALLOC");
if (use_jemalloc)
mono_init_jemalloc ();
#endif
g_log_set_always_fatal (G_LOG_LEVEL_ERROR);
g_log_set_fatal_mask (G_LOG_DOMAIN, G_LOG_LEVEL_ERROR);
opt = mono_parse_default_optimizations (NULL);
enable_debugging = TRUE;
mono_options_parse_options ((const char**)argv + 1, argc - 1, &argc, error);
argc ++;
if (!is_ok (error)) {
g_printerr ("%s", mono_error_get_message (error));
mono_error_cleanup (error);
return 1;
}
for (i = 1; i < argc; ++i) {
if (argv [i] [0] != '-')
break;
if (strcmp (argv [i], "--regression") == 0) {
action = DO_REGRESSION;
} else if (strncmp (argv [i], "--single-method=", 16) == 0) {
char *full_opts = g_strdup_printf ("-all,%s", argv [i] + 16);
action = DO_SINGLE_METHOD_REGRESSION;
mono_single_method_regression_opt = parse_optimizations (opt, full_opts, TRUE);
g_free (full_opts);
} else if (strcmp (argv [i], "--verbose") == 0 || strcmp (argv [i], "-v") == 0) {
mini_verbose_level++;
} else if (strcmp (argv [i], "--version=number") == 0) {
g_print ("%s\n", VERSION);
return 0;
} else if (strcmp (argv [i], "--version") == 0 || strcmp (argv [i], "-V") == 0) {
char *build = mono_get_runtime_build_info ();
char *gc_descr;
g_print ("Mono JIT compiler version %s\nCopyright (C) Novell, Inc, Xamarin Inc and Contributors. www.mono-project.com\n", build);
g_free (build);
char *info = mono_get_version_info ();
g_print (info);
g_free (info);
gc_descr = mono_gc_get_description ();
g_print ("\tGC: %s\n", gc_descr);
g_free (gc_descr);
return 0;
} else if (strcmp (argv [i], "--help") == 0 || strcmp (argv [i], "-h") == 0) {
mini_usage ();
return 0;
} else if (strcmp (argv [i], "--help-trace") == 0){
mini_trace_usage ();
return 0;
} else if (strcmp (argv [i], "--help-devel") == 0){
mini_usage_jitdeveloper ();
return 0;
} else if (strcmp (argv [i], "--help-debug") == 0){
mini_debug_usage ();
return 0;
} else if (strcmp (argv [i], "--list-opt") == 0){
mini_usage_list_opt ();
return 0;
} else if (strncmp (argv [i], "--statfile", 10) == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --statfile requires a filename argument\n");
return 1;
}
mini_stats_fd = fopen (argv [++i], "w+");
} else if (strncmp (argv [i], "--optimize=", 11) == 0) {
opt = parse_optimizations (opt, argv [i] + 11, TRUE);
} else if (strncmp (argv [i], "-O=", 3) == 0) {
opt = parse_optimizations (opt, argv [i] + 3, TRUE);
} else if (strncmp (argv [i], "--bisect=", 9) == 0) {
char *param = argv [i] + 9;
char *sep = strchr (param, ':');
if (!sep) {
fprintf (stderr, "Error: --bisect requires OPT:FILENAME\n");
return 1;
}
char *opt_string = g_strndup (param, sep - param);
guint32 opt = parse_optimizations (0, opt_string, FALSE);
g_free (opt_string);
mono_set_bisect_methods (opt, sep + 1);
} else if (strcmp (argv [i], "--gc=sgen") == 0) {
switch_gc (argv, "sgen");
} else if (strcmp (argv [i], "--gc=boehm") == 0) {
switch_gc (argv, "boehm");
} else if (strncmp (argv[i], "--gc-params=", 12) == 0) {
mono_gc_params_set (argv[i] + 12);
} else if (strncmp (argv[i], "--gc-debug=", 11) == 0) {
mono_gc_debug_set (argv[i] + 11);
}
#ifdef TARGET_OSX
else if (strcmp (argv [i], "--arch=32") == 0) {
switch_arch (argv, "32");
} else if (strcmp (argv [i], "--arch=64") == 0) {
switch_arch (argv, "64");
}
#endif
else if (strcmp (argv [i], "--config") == 0) {
if (i +1 >= argc){
fprintf (stderr, "error: --config requires a filename argument\n");
return 1;
}
++i;
#ifdef HOST_WIN32
} else if (strcmp (argv [i], "--mixed-mode") == 0) {
mixed_mode = TRUE;
#endif
#ifndef DISABLE_JIT
} else if (strcmp (argv [i], "--ncompile") == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --ncompile requires an argument\n");
return 1;
}
count = atoi (argv [++i]);
action = DO_BENCH;
#endif
} else if (strcmp (argv [i], "--trace") == 0) {
trace_options = (char*)"";
} else if (strncmp (argv [i], "--trace=", 8) == 0) {
trace_options = &argv [i][8];
} else if (strcmp (argv [i], "--breakonex") == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
opt->break_on_exc = TRUE;
} else if (strcmp (argv [i], "--break") == 0) {
if (i+1 >= argc){
fprintf (stderr, "Missing method name in --break command line option\n");
return 1;
}
if (!mono_debugger_insert_breakpoint (argv [++i], FALSE))
fprintf (stderr, "Error: invalid method name '%s'\n", argv [i]);
} else if (strcmp (argv [i], "--break-at-bb") == 0) {
if (i + 2 >= argc) {
fprintf (stderr, "Missing method name or bb num in --break-at-bb command line option.");
return 1;
}
mono_break_at_bb_method = mono_method_desc_new (argv [++i], TRUE);
if (mono_break_at_bb_method == NULL) {
fprintf (stderr, "Method name is in a bad format in --break-at-bb command line option.");
return 1;
}
mono_break_at_bb_bb_num = atoi (argv [++i]);
} else if (strcmp (argv [i], "--inject-async-exc") == 0) {
if (i + 2 >= argc) {
fprintf (stderr, "Missing method name or position in --inject-async-exc command line option\n");
return 1;
}
mono_inject_async_exc_method = mono_method_desc_new (argv [++i], TRUE);
if (mono_inject_async_exc_method == NULL) {
fprintf (stderr, "Method name is in a bad format in --inject-async-exc command line option\n");
return 1;
}
mono_inject_async_exc_pos = atoi (argv [++i]);
} else if (strcmp (argv [i], "--verify-all") == 0) {
g_warning ("--verify-all is obsolete, ignoring");
} else if (strcmp (argv [i], "--full-aot") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_FULL);
} else if (strcmp (argv [i], "--llvmonly") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY);
} else if (strcmp (argv [i], "--hybrid-aot") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_HYBRID);
} else if (strcmp (argv [i], "--full-aot-interp") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_INTERP);
} else if (strcmp (argv [i], "--llvmonly-interp") == 0) {
mono_jit_set_aot_mode (MONO_AOT_MODE_LLVMONLY_INTERP);
} else if (strcmp (argv [i], "--print-vtable") == 0) {
mono_print_vtable = TRUE;
} else if (strcmp (argv [i], "--stats") == 0) {
enable_runtime_stats ();
} else if (strncmp (argv [i], "--stats=", 8) == 0) {
enable_runtime_stats ();
if (mono_stats_method_desc)
g_free (mono_stats_method_desc);
mono_stats_method_desc = parse_qualified_method_name (argv [i] + 8);
#ifndef DISABLE_AOT
} else if (strcmp (argv [i], "--aot") == 0) {
error_if_aot_unsupported ();
mono_compile_aot = TRUE;
} else if (strncmp (argv [i], "--aot=", 6) == 0) {
error_if_aot_unsupported ();
mono_compile_aot = TRUE;
if (aot_options) {
char *tmp = g_strdup_printf ("%s,%s", aot_options, &argv [i][6]);
g_free (aot_options);
aot_options = tmp;
} else {
aot_options = g_strdup (&argv [i][6]);
}
#endif
} else if (strncmp (argv [i], "--apply-bindings=", 17) == 0) {
extra_bindings_config_file = &argv[i][17];
} else if (strncmp (argv [i], "--aot-path=", 11) == 0) {
char **splitted;
splitted = g_strsplit (argv [i] + 11, G_SEARCHPATH_SEPARATOR_S, 1000);
while (*splitted) {
char *tmp = *splitted;
mono_aot_paths = g_list_append (mono_aot_paths, g_strdup (tmp));
g_free (tmp);
splitted++;
}
} else if (strncmp (argv [i], "--compile-all=", 14) == 0) {
action = DO_COMPILE;
recompilation_times = atoi (argv [i] + 14);
} else if (strcmp (argv [i], "--compile-all") == 0) {
action = DO_COMPILE;
} else if (strncmp (argv [i], "--runtime=", 10) == 0) {
forced_version = &argv [i][10];
} else if (strcmp (argv [i], "--jitmap") == 0) {
mono_enable_jit_map ();
#ifdef ENABLE_JIT_DUMP
} else if (strcmp (argv [i], "--jitdump") == 0) {
mono_enable_jit_dump ();
#endif
} else if (strcmp (argv [i], "--profile") == 0) {
mini_add_profiler_argument (NULL);
} else if (strncmp (argv [i], "--profile=", 10) == 0) {
mini_add_profiler_argument (argv [i] + 10);
} else if (strncmp (argv [i], "--agent=", 8) == 0) {
if (agents == NULL)
agents = g_ptr_array_new ();
g_ptr_array_add (agents, argv [i] + 8);
} else if (strncmp (argv [i], "--attach=", 9) == 0) {
g_warning ("--attach= option no longer supported.");
} else if (strcmp (argv [i], "--compile") == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --compile option requires a method name argument\n");
return 1;
}
mname = argv [++i];
action = DO_BENCH;
#ifndef DISABLE_JIT
} else if (strncmp (argv [i], "--graph=", 8) == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --graph option requires a method name argument\n");
return 1;
}
mono_graph_options = mono_parse_graph_options (argv [i] + 8);
mname = argv [++i];
action = DO_DRAW;
} else if (strcmp (argv [i], "--graph") == 0) {
if (i + 1 >= argc){
fprintf (stderr, "error: --graph option requires a method name argument\n");
return 1;
}
mname = argv [++i];
mono_graph_options = MONO_GRAPH_CFG;
action = DO_DRAW;
#endif
} else if (strcmp (argv [i], "--debug") == 0) {
enable_debugging = TRUE;
} else if (strncmp (argv [i], "--debug=", 8) == 0) {
enable_debugging = TRUE;
if (!parse_debug_options (argv [i] + 8))
return 1;
MonoDebugOptions *opt = mini_get_debug_options ();
if (!opt->enabled) {
enable_debugging = FALSE;
}
} else if (strncmp (argv [i], "--debugger-agent=", 17) == 0) {
MonoDebugOptions *opt = mini_get_debug_options ();
mono_debugger_agent_parse_options (g_strdup (argv [i] + 17));
opt->mdb_optimizations = TRUE;
enable_debugging = TRUE;
} else if (strcmp (argv [i], "--security") == 0) {
fprintf (stderr, "error: --security is obsolete.");
return 1;
} else if (strncmp (argv [i], "--security=", 11) == 0) {
if (strcmp (argv [i] + 11, "core-clr") == 0) {
fprintf (stderr, "error: --security=core-clr is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "core-clr-test") == 0) {
fprintf (stderr, "error: --security=core-clr-test is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "cas") == 0) {
fprintf (stderr, "error: --security=cas is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "validil") == 0) {
fprintf (stderr, "error: --security=validil is obsolete.");
return 1;
} else if (strcmp (argv [i] + 11, "verifiable") == 0) {
fprintf (stderr, "error: --securty=verifiable is obsolete.");
return 1;
} else {
fprintf (stderr, "error: --security= option has invalid argument (cas, core-clr, verifiable or validil)\n");
return 1;
}
} else if (strcmp (argv [i], "--desktop") == 0) {
mono_gc_set_desktop_mode ();
/* Put more desktop-specific optimizations here */
} else if (strcmp (argv [i], "--server") == 0){
mono_config_set_server_mode (TRUE);
/* Put more server-specific optimizations here */
} else if (strcmp (argv [i], "--inside-mdb") == 0) {
action = DO_DEBUGGER;
} else if (strncmp (argv [i], "--wapi=", 7) == 0) {
fprintf (stderr, "--wapi= option no longer supported\n.");
return 1;
} else if (strcmp (argv [i], "--no-x86-stack-align") == 0) {
mono_do_x86_stack_align = FALSE;
#ifdef MONO_JIT_INFO_TABLE_TEST
} else if (strcmp (argv [i], "--test-jit-info-table") == 0) {
test_jit_info_table = TRUE;
#endif
} else if (strcmp (argv [i], "--llvm") == 0) {
#ifndef MONO_ARCH_LLVM_SUPPORTED
fprintf (stderr, "Mono Warning: --llvm not supported on this platform.\n");
#elif !defined(ENABLE_LLVM)
fprintf (stderr, "Mono Warning: --llvm not enabled in this runtime.\n");
#else
mono_use_llvm = TRUE;
#endif
} else if (strcmp (argv [i], "--nollvm") == 0){
mono_use_llvm = FALSE;
} else if (strcmp (argv [i], "--ffast-math") == 0){
mono_use_fast_math = TRUE;
} else if ((strcmp (argv [i], "--interpreter") == 0) || !strcmp (argv [i], "--interp")) {
mono_runtime_set_execution_mode (MONO_EE_MODE_INTERP);
} else if (strncmp (argv [i], "--interp=", 9) == 0) {
mono_runtime_set_execution_mode_full (MONO_EE_MODE_INTERP, FALSE);
mono_interp_opts_string = argv [i] + 9;
} else if (strcmp (argv [i], "--print-icall-table") == 0) {
#ifdef ENABLE_ICALL_SYMBOL_MAP
print_icall_table ();
exit (0);
#else
fprintf (stderr, "--print-icall-table requires a runtime configured with the --enable-icall-symbol-map option.\n");
exit (1);
#endif
} else if (strncmp (argv [i], "--assembly-loader=", strlen("--assembly-loader=")) == 0) {
gchar *arg = argv [i] + strlen ("--assembly-loader=");
if (strcmp (arg, "strict") == 0)
mono_loader_set_strict_assembly_name_check (TRUE);
else if (strcmp (arg, "legacy") == 0)
mono_loader_set_strict_assembly_name_check (FALSE);
else
fprintf (stderr, "Warning: unknown argument to --assembly-loader. Should be \"strict\" or \"legacy\"\n");
} else if (strncmp (argv [i], MONO_HANDLERS_ARGUMENT, MONO_HANDLERS_ARGUMENT_LEN) == 0) {
//Install specific custom handlers.
if (!mono_runtime_install_custom_handlers (argv[i] + MONO_HANDLERS_ARGUMENT_LEN)) {
fprintf (stderr, "error: " MONO_HANDLERS_ARGUMENT ", one or more unknown handlers: '%s'\n", argv [i]);
return 1;
}
} else if (strcmp (argv [i], "--help-handlers") == 0) {
mono_runtime_install_custom_handlers_usage ();
return 0;
} else if (strncmp (argv [i], "--response=", 11) == 0){
gchar *response_content;
gchar *response_options;
gsize response_content_len;
if (!g_file_get_contents (&argv[i][11], &response_content, &response_content_len, NULL)){
fprintf (stderr, "The specified response file can not be read\n");
exit (1);
}
response_options = response_content;
// Check for UTF8 BOM in file and remove if found.
if (response_content_len >= 3 && response_content [0] == '\xef' && response_content [1] == '\xbb' && response_content [2] == '\xbf') {
response_content_len -= 3;
response_options += 3;
}
if (response_content_len == 0) {
fprintf (stderr, "The specified response file is empty\n");
exit (1);
}
mono_parse_response_options (response_options, &argc, &argv, FALSE);
g_free (response_content);
} else if (argv [i][0] == '-' && argv [i][1] == '-' && mini_parse_debug_option (argv [i] + 2)) {
} else if (strcmp (argv [i], "--use-map-jit") == 0){
mono_setmmapjit (TRUE);
} else {
fprintf (stderr, "Unknown command line option: '%s'\n", argv [i]);
return 1;
}
}
#if defined(DISABLE_HW_TRAPS) || defined(MONO_ARCH_DISABLE_HW_TRAPS)
// Signal handlers not available
{
MonoDebugOptions *opt = mini_get_debug_options ();
opt->explicit_null_checks = TRUE;
}
#endif
if (!argv [i]) {
mini_usage ();
return 1;
}
if (g_hasenv ("MONO_XDEBUG"))
enable_debugging = TRUE;
#ifdef MONO_CROSS_COMPILE
if (!mono_compile_aot) {
fprintf (stderr, "This mono runtime is compiled for cross-compiling. Only the --aot option is supported.\n");
exit (1);
}
#if TARGET_SIZEOF_VOID_P == 4 && (defined(TARGET_ARM64) || defined(TARGET_AMD64)) && !defined(MONO_ARCH_ILP32)
fprintf (stderr, "Can't cross-compile on 32-bit platforms to 64-bit architecture.\n");
exit (1);
#endif
#endif
if (mono_compile_aot || action == DO_EXEC || action == DO_DEBUGGER) {
g_set_prgname (argv[i]);
}
mono_counters_init ();
#ifndef HOST_WIN32
mono_w32handle_init ();
#endif
/* Set rootdir before loading config */
mono_set_rootdir ();
if (trace_options != NULL){
/*
* Need to call this before mini_init () so we can trace methods
* compiled there too.
*/
mono_jit_trace_calls = mono_trace_set_options (trace_options);
if (mono_jit_trace_calls == NULL)
exit (1);
}
#ifdef DISABLE_JIT
if (!mono_aot_only && !mono_use_interpreter) {
fprintf (stderr, "This runtime has been configured with --enable-minimal=jit, so the --full-aot command line option is required.\n");
exit (1);
}
#endif
if (action == DO_DEBUGGER) {
enable_debugging = TRUE;
g_print ("The Mono Debugger is no longer supported.\n");
return 1;
} else if (enable_debugging)
mono_debug_init (MONO_DEBUG_FORMAT_MONO);
#ifdef HOST_WIN32
if (mixed_mode)
mono_load_coree (argv [i]);
#endif
mono_set_defaults (mini_verbose_level, opt);
mono_set_os_args (argc, argv);
domain = mini_init (argv [i], forced_version);
mono_gc_set_stack_end (&domain);
if (agents) {
int i;
for (i = 0; i < agents->len; ++i) {
int res = load_agent (domain, (char*)g_ptr_array_index (agents, i));
if (res) {
g_ptr_array_free (agents, TRUE);
mini_cleanup (domain);
return 1;
}
}
g_ptr_array_free (agents, TRUE);
}
switch (action) {
case DO_SINGLE_METHOD_REGRESSION:
case DO_REGRESSION:
return mono_exec_regression_internal (mini_verbose_level, argc -i, argv + i, action == DO_SINGLE_METHOD_REGRESSION);
case DO_BENCH:
if (argc - i != 1 || mname == NULL) {
g_print ("Usage: mini --ncompile num --compile method assembly\n");
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
case DO_COMPILE:
if (argc - i != 1) {
mini_usage ();
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
case DO_DRAW:
if (argc - i != 1 || mname == NULL) {
mini_usage ();
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
default:
if (argc - i < 1) {
mini_usage ();
mini_cleanup (domain);
return 1;
}
aname = argv [i];
break;
}
#ifdef MONO_JIT_INFO_TABLE_TEST
if (test_jit_info_table)
jit_info_table_test (domain);
#endif
if (mono_compile_aot && extra_bindings_config_file != NULL) {
apply_root_domain_configuration_file_bindings (domain, extra_bindings_config_file);
}
MonoAssemblyOpenRequest open_req;
mono_assembly_request_prepare_open (&open_req, mono_alc_get_default ());
assembly = mono_assembly_request_open (aname, &open_req, &open_status);
if (!assembly && !mono_compile_aot) {
fprintf (stderr, "Cannot open assembly '%s': %s.\n", aname, mono_image_strerror (open_status));
mini_cleanup (domain);
return 2;
}
mono_callspec_set_assembly (assembly);
if (mono_compile_aot || action == DO_EXEC) {
const char *error;
//mono_set_rootdir ();
error = mono_check_corlib_version ();
if (error) {
fprintf (stderr, "Corlib not in sync with this runtime: %s\n", error);
fprintf (stderr, "Loaded from: %s\n",
mono_defaults.corlib? mono_image_get_filename (mono_defaults.corlib): "unknown");
fprintf (stderr, "Download a newer corlib or a newer runtime at http://www.mono-project.com/download.\n");
exit (1);
}
#if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_CONSOLE
/* Detach console when executing IMAGE_SUBSYSTEM_WINDOWS_GUI on win32 */
if (!enable_debugging && !mono_compile_aot && mono_assembly_get_image_internal (assembly)->image_info->cli_header.nt.pe_subsys_required == IMAGE_SUBSYSTEM_WINDOWS_GUI)
FreeConsole ();
#endif
main_args.domain = domain;
main_args.file = aname;
main_args.argc = argc - i;
main_args.argv = argv + i;
main_args.opts = opt;
main_args.aot_options = aot_options;
main_thread_handler (&main_args);
mono_thread_manage_internal ();
mini_cleanup (domain);
/* Look up return value from System.Environment.ExitCode */
i = mono_environment_exitcode_get ();
return i;
} else if (action == DO_COMPILE) {
compile_all_methods (assembly, mini_verbose_level, opt, recompilation_times);
mini_cleanup (domain);
return 0;
} else if (action == DO_DEBUGGER) {
return 1;
}
desc = mono_method_desc_new (mname, 0);
if (!desc) {
g_print ("Invalid method name %s\n", mname);
mini_cleanup (domain);
return 3;
}
method = mono_method_desc_search_in_image (desc, mono_assembly_get_image_internal (assembly));
if (!method) {
g_print ("Cannot find method %s\n", mname);
mini_cleanup (domain);
return 3;
}
#ifndef DISABLE_JIT
MonoCompile *cfg;
if (action == DO_DRAW) {
int part = 0;
switch (mono_graph_options) {
case MONO_GRAPH_DTREE:
part = 1;
opt |= MONO_OPT_LOOP;
break;
case MONO_GRAPH_CFG_CODE:
part = 1;
break;
case MONO_GRAPH_CFG_SSA:
part = 2;
break;
case MONO_GRAPH_CFG_OPTCODE:
part = 3;
break;
default:
break;
}
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
MonoMethod *nm;
nm = mono_marshal_get_native_wrapper (method, TRUE, FALSE);
cfg = mini_method_compile (nm, opt, (JitFlags)0, part, -1);
}
else
cfg = mini_method_compile (method, opt, (JitFlags)0, part, -1);
if ((mono_graph_options & MONO_GRAPH_CFG_SSA) && !(cfg->comp_done & MONO_COMP_SSA)) {
g_warning ("no SSA info available (use -O=deadce)");
return 1;
}
mono_draw_graph (cfg, mono_graph_options);
mono_destroy_compile (cfg);
} else if (action == DO_BENCH) {
if (mini_stats_fd) {
const char *n;
double no_opt_time = 0.0;
GTimer *timer = g_timer_new ();
fprintf (mini_stats_fd, "$stattitle = \'Compilations times for %s\';\n",
mono_method_full_name (method, TRUE));
fprintf (mini_stats_fd, "@data = (\n");
fprintf (mini_stats_fd, "[");
for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) {
opt = opt_sets [i];
n = mono_opt_descr (opt);
if (!n [0])
n = "none";
fprintf (mini_stats_fd, "\"%s\",", n);
}
fprintf (mini_stats_fd, "],\n[");
for (i = 0; i < G_N_ELEMENTS (opt_sets); i++) {
int j;
double elapsed;
opt = opt_sets [i];
g_timer_start (timer);
for (j = 0; j < count; ++j) {
cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1);
mono_destroy_compile (cfg);
}
g_timer_stop (timer);
elapsed = g_timer_elapsed (timer, NULL);
if (!opt)
no_opt_time = elapsed;
fprintf (mini_stats_fd, "%f, ", elapsed);
}
fprintf (mini_stats_fd, "]");
if (no_opt_time > 0.0) {
fprintf (mini_stats_fd, ", \n[");
for (i = 0; i < G_N_ELEMENTS (opt_sets); i++)
fprintf (mini_stats_fd, "%f,", no_opt_time);
fprintf (mini_stats_fd, "]");
}
fprintf (mini_stats_fd, ");\n");
} else {
for (i = 0; i < count; ++i) {
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
method = mono_marshal_get_native_wrapper (method, TRUE, FALSE);
cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1);
mono_destroy_compile (cfg);
}
}
} else {
cfg = mini_method_compile (method, opt, (JitFlags)0, 0, -1);
mono_destroy_compile (cfg);
}
#endif
mini_cleanup (domain);
return 0;
}
/**
* mono_jit_init:
*/
MonoDomain *
mono_jit_init (const char *file)
{
MonoDomain *ret = mini_init (file, NULL);
MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc
return ret;
}
/**
* mono_jit_init_version:
* \param domain_name the name of the root domain
* \param runtime_version the version of the runtime to load
*
* Use this version when you want to force a particular runtime
* version to be used. By default Mono will pick the runtime that is
* referenced by the initial assembly (specified in \p file), this
* routine allows programmers to specify the actual runtime to be used
* as the initial runtime is inherited by all future assemblies loaded
* (since Mono does not support having more than one mscorlib runtime
* loaded at once).
*
* The \p runtime_version can be one of these strings: "v4.0.30319" for
* desktop, "mobile" for mobile or "moonlight" for Silverlight compat.
* If an unrecognized string is input, the vm will default to desktop.
*
* \returns the \c MonoDomain representing the domain where the assembly
* was loaded.
*/
MonoDomain *
mono_jit_init_version (const char *domain_name, const char *runtime_version)
{
MonoDomain *ret = mini_init (domain_name, runtime_version);
MONO_ENTER_GC_SAFE_UNBALANCED; //once it is not executing any managed code yet, it's safe to run the gc
return ret;
}
MonoDomain *
mono_jit_init_version_for_test_only (const char *domain_name, const char *runtime_version)
{
MonoDomain *ret = mini_init (domain_name, runtime_version);
return ret;
}
/**
* mono_jit_cleanup:
*/
void
mono_jit_cleanup (MonoDomain *domain)
{
MONO_STACKDATA (dummy);
(void) mono_threads_enter_gc_unsafe_region_unbalanced_internal (&dummy);
// after mini_cleanup everything is cleaned up so MONO_EXIT_GC_UNSAFE
// can't work and doesn't make sense.
mono_thread_manage_internal ();
mini_cleanup (domain);
}
void
mono_jit_set_aot_only (gboolean val)
{
mono_aot_only = val;
mono_ee_features.use_aot_trampolines = val;
}
static void
mono_runtime_set_execution_mode_full (int mode, gboolean override)
{
static gboolean mode_initialized = FALSE;
if (mode_initialized && !override)
return;
mode_initialized = TRUE;
memset (&mono_ee_features, 0, sizeof (mono_ee_features));
switch (mode) {
case MONO_AOT_MODE_LLVMONLY:
mono_aot_only = TRUE;
mono_llvm_only = TRUE;
mono_ee_features.use_aot_trampolines = TRUE;
break;
case MONO_AOT_MODE_FULL:
mono_aot_only = TRUE;
mono_ee_features.use_aot_trampolines = TRUE;
break;
case MONO_AOT_MODE_HYBRID:
mono_set_generic_sharing_vt_supported (TRUE);
mono_set_partial_sharing_supported (TRUE);
break;
case MONO_AOT_MODE_INTERP:
mono_aot_only = TRUE;
mono_use_interpreter = TRUE;
mono_ee_features.use_aot_trampolines = TRUE;
break;
case MONO_AOT_MODE_INTERP_LLVMONLY:
mono_aot_only = TRUE;
mono_use_interpreter = TRUE;
mono_llvm_only = TRUE;
mono_ee_features.force_use_interpreter = TRUE;
break;
case MONO_AOT_MODE_LLVMONLY_INTERP:
mono_aot_only = TRUE;
mono_use_interpreter = TRUE;
mono_llvm_only = TRUE;
break;
case MONO_AOT_MODE_INTERP_ONLY:
mono_check_interp_supported ();
mono_use_interpreter = TRUE;
mono_ee_features.force_use_interpreter = TRUE;
break;
case MONO_AOT_MODE_NORMAL:
case MONO_AOT_MODE_NONE:
break;
default:
g_error ("Unknown execution-mode %d", mode);
}
}
static void
mono_runtime_set_execution_mode (int mode)
{
mono_runtime_set_execution_mode_full (mode, TRUE);
}
/**
* mono_jit_set_aot_mode:
*/
void
mono_jit_set_aot_mode (MonoAotMode mode)
{
/* we don't want to set mono_aot_mode twice */
static gboolean inited;
g_assert (!inited);
mono_aot_mode = mode;
inited = TRUE;
mono_runtime_set_execution_mode (mode);
}
mono_bool
mono_jit_aot_compiling (void)
{
return mono_compile_aot;
}
/**
* mono_jit_set_trace_options:
* \param options string representing the trace options
* Set the options of the tracing engine. This function can be called before initializing
* the mono runtime. See the --trace mono(1) manpage for the options format.
*
* \returns TRUE if the options were parsed and set correctly, FALSE otherwise.
*/
gboolean
mono_jit_set_trace_options (const char* options)
{
MonoCallSpec *trace_opt = mono_trace_set_options (options);
if (trace_opt == NULL)
return FALSE;
mono_jit_trace_calls = trace_opt;
return TRUE;
}
/**
* mono_set_signal_chaining:
*
* Enable/disable signal chaining. This should be called before \c mono_jit_init.
* If signal chaining is enabled, the runtime saves the original signal handlers before
* installing its own handlers, and calls the original ones in the following cases:
* - a \c SIGSEGV / \c SIGABRT signal received while executing native (i.e. not JITted) code.
* - \c SIGPROF
* - \c SIGFPE
* - \c SIGQUIT
* - \c SIGUSR2
* Signal chaining only works on POSIX platforms.
*/
void
mono_set_signal_chaining (gboolean chain_signals)
{
mono_do_signal_chaining = chain_signals;
}
/**
* mono_set_crash_chaining:
*
* Enable/disable crash chaining due to signals. When a fatal signal is delivered and
* Mono doesn't know how to handle it, it will invoke the crash handler. If chrash chaining
* is enabled, it will first print its crash information and then try to chain with the native handler.
*/
void
mono_set_crash_chaining (gboolean chain_crashes)
{
mono_do_crash_chaining = chain_crashes;
}
/**
* mono_parse_options_from:
* \param options string containing strings
* \param ref_argc pointer to the \c argc variable that might be updated
* \param ref_argv pointer to the \c argv string vector variable that might be updated
*
* This function parses the contents of the \c MONO_ENV_OPTIONS
* environment variable as if they were parsed by a command shell
* splitting the contents by spaces into different elements of the
* \p argv vector. This method supports quoting with both the " and '
* characters. Inside quoting, spaces and tabs are significant,
* otherwise, they are considered argument separators.
*
* The \ character can be used to escape the next character which will
* be added to the current element verbatim. Typically this is used
* inside quotes. If the quotes are not balanced, this method
*
* If the environment variable is empty, no changes are made
* to the values pointed by \p ref_argc and \p ref_argv.
*
* Otherwise the \p ref_argv is modified to point to a new array that contains
* all the previous elements contained in the vector, plus the values parsed.
* The \p argc is updated to match the new number of parameters.
*
* \returns The value NULL is returned on success, otherwise a \c g_strdup allocated
* string is returned (this is an alias to \c malloc under normal circumstances) that
* contains the error message that happened during parsing.
*/
char *
mono_parse_options_from (const char *options, int *ref_argc, char **ref_argv [])
{
return mono_parse_options (options, ref_argc, ref_argv, TRUE);
}
static void
merge_parsed_options (GPtrArray *parsed_options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
int argc = *ref_argc;
char **argv = *ref_argv;
if (parsed_options->len > 0){
int new_argc = parsed_options->len + argc;
char **new_argv = g_new (char *, new_argc + 1);
guint i;
guint j;
new_argv [0] = argv [0];
i = 1;
if (prepend){
/* First the environment variable settings, to allow the command line options to override */
for (i = 0; i < parsed_options->len; i++)
new_argv [i+1] = (char *)g_ptr_array_index (parsed_options, i);
i++;
}
for (j = 1; j < argc; j++)
new_argv [i++] = argv [j];
if (!prepend){
for (j = 0; j < parsed_options->len; j++)
new_argv [i++] = (char *)g_ptr_array_index (parsed_options, j);
}
new_argv [i] = NULL;
*ref_argc = new_argc;
*ref_argv = new_argv;
}
}
static char *
mono_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
if (options == NULL)
return NULL;
GPtrArray *array = g_ptr_array_new ();
GString *buffer = g_string_new ("");
const char *p;
gboolean in_quotes = FALSE;
char quote_char = '\0';
for (p = options; *p; p++){
switch (*p){
case ' ': case '\t': case '\n':
if (!in_quotes) {
if (buffer->len != 0){
g_ptr_array_add (array, g_strdup (buffer->str));
g_string_truncate (buffer, 0);
}
} else {
g_string_append_c (buffer, *p);
}
break;
case '\\':
if (p [1]){
g_string_append_c (buffer, p [1]);
p++;
}
break;
case '\'':
case '"':
if (in_quotes) {
if (quote_char == *p)
in_quotes = FALSE;
else
g_string_append_c (buffer, *p);
} else {
in_quotes = TRUE;
quote_char = *p;
}
break;
default:
g_string_append_c (buffer, *p);
break;
}
}
if (in_quotes)
return g_strdup_printf ("Unmatched quotes in value: [%s]\n", options);
if (buffer->len != 0)
g_ptr_array_add (array, g_strdup (buffer->str));
g_string_free (buffer, TRUE);
merge_parsed_options (array, ref_argc, ref_argv, prepend);
g_ptr_array_free (array, TRUE);
return NULL;
}
#if defined(HOST_WIN32) && HAVE_API_SUPPORT_WIN32_COMMAND_LINE_TO_ARGV
#include <shellapi.h>
static char *
mono_win32_parse_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
int argc;
gunichar2 **argv;
gunichar2 *optionsw;
if (!options)
return NULL;
GPtrArray *array = g_ptr_array_new ();
optionsw = g_utf8_to_utf16 (options, -1, NULL, NULL, NULL);
if (optionsw) {
gunichar2 *p;
gboolean in_quotes = FALSE;
gunichar2 quote_char = L'\0';
for (p = optionsw; *p; p++){
switch (*p){
case L'\n':
if (!in_quotes)
*p = L' ';
break;
case L'\'':
case L'"':
if (in_quotes) {
if (quote_char == *p)
in_quotes = FALSE;
} else {
in_quotes = TRUE;
quote_char = *p;
}
break;
}
}
argv = CommandLineToArgvW (optionsw, &argc);
if (argv) {
for (int i = 0; i < argc; i++)
g_ptr_array_add (array, g_utf16_to_utf8 (argv[i], -1, NULL, NULL, NULL));
LocalFree (argv);
}
g_free (optionsw);
}
merge_parsed_options (array, ref_argc, ref_argv, prepend);
g_ptr_array_free (array, TRUE);
return NULL;
}
static char *
mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
return mono_win32_parse_options (options, ref_argc, ref_argv, prepend);
}
#else
static char *
mono_parse_response_options (const char *options, int *ref_argc, char **ref_argv [], gboolean prepend)
{
return mono_parse_options (options, ref_argc, ref_argv, prepend);
}
#endif
/**
* mono_parse_env_options:
* \param ref_argc pointer to the \c argc variable that might be updated
* \param ref_argv pointer to the \c argv string vector variable that might be updated
*
* This function parses the contents of the \c MONO_ENV_OPTIONS
* environment variable as if they were parsed by a command shell
* splitting the contents by spaces into different elements of the
* \p argv vector. This method supports quoting with both the " and '
* characters. Inside quoting, spaces and tabs are significant,
* otherwise, they are considered argument separators.
*
* The \ character can be used to escape the next character which will
* be added to the current element verbatim. Typically this is used
* inside quotes. If the quotes are not balanced, this method
*
* If the environment variable is empty, no changes are made
* to the values pointed by \p ref_argc and \p ref_argv.
*
* Otherwise the \p ref_argv is modified to point to a new array that contains
* all the previous elements contained in the vector, plus the values parsed.
* The \p argc is updated to match the new number of parameters.
*
* If there is an error parsing, this method will terminate the process by
* calling exit(1).
*
* An alternative to this method that allows an arbitrary string to be parsed
* and does not exit on error is the `api:mono_parse_options_from`.
*/
void
mono_parse_env_options (int *ref_argc, char **ref_argv [])
{
char *ret;
char *env_options = g_getenv ("MONO_ENV_OPTIONS");
if (env_options == NULL)
return;
ret = mono_parse_options_from (env_options, ref_argc, ref_argv);
g_free (env_options);
if (ret == NULL)
return;
fprintf (stderr, "%s", ret);
exit (1);
}
MonoDebugOptions *
get_mini_debug_options (void)
{
return &mini_debug_options;
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/intrinsics.c | /**
* Intrinsics support
*/
#include <config.h>
#include <glib.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-math.h>
#include <math.h>
#ifndef DISABLE_JIT
#include "mini.h"
#include "mini-runtime.h"
#include "ir-emit.h"
#include "jit-icalls.h"
#include <mono/metadata/abi-details.h>
#include <mono/metadata/class-abi-details.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/monitor.h>
#include <mono/utils/mono-memory-model.h>
static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
static GENERATE_TRY_GET_CLASS_WITH_CACHE (memory_marshal, "System.Runtime.InteropServices", "MemoryMarshal")
static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math")
/* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic calls */
static MonoInst*
emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
{
MonoInst *addr, *store, *load;
MonoClass *eklass = mono_class_from_mono_type_internal (fsig->params [1]);
/* the bounds check is already done by the callers */
addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE);
MonoType *etype = m_class_get_byval_arg (eklass);
if (is_set) {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, args [2]->dreg, 0);
if (!mini_debug_options.weak_memory_model && mini_type_is_reference (etype))
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, addr->dreg, 0, load->dreg);
if (mini_type_is_reference (etype))
mini_emit_write_barrier (cfg, addr, load);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, addr->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, args [2]->dreg, 0, load->dreg);
}
return store;
}
static gboolean
mono_type_is_native_blittable (MonoType *t)
{
if (MONO_TYPE_IS_REFERENCE (t))
return FALSE;
if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
return TRUE;
MonoClass *klass = mono_class_from_mono_type_internal (t);
//MonoClass::blitable depends on mono_class_setup_fields being done.
mono_class_setup_fields (klass);
if (!m_class_is_blittable (klass))
return FALSE;
// If the native marshal size is different we can't convert PtrToStructure to a type load
if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
return FALSE;
return TRUE;
}
MonoInst*
mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
const char* cmethod_klass_name_space = m_class_get_name_space (cmethod->klass);
const char* cmethod_klass_name = m_class_get_name (cmethod->klass);
MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass);
gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib;
MonoInst *ins = NULL;
/* Required intrinsics are always used even with -O=-intrins */
if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System") &&
!strcmp (cmethod_klass_name, "ByReference`1")) {
/* public ByReference(ref T value) */
g_assert (fsig->hasthis && fsig->param_count == 1);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [0]->dreg, 0, args [1]->dreg);
return ins;
}
ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
if (!(cfg->opt & MONO_OPT_INTRINS))
return NULL;
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (cfg->opt & MONO_OPT_SIMD) {
ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
}
#endif
return NULL;
}
static MonoInst*
llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean in_corlib)
{
MonoInst *ins = NULL;
int opcode = 0;
// Convert Math and MathF methods into LLVM intrinsics, e.g. MathF.Sin -> @llvm.sin.f32
if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "MathF")) {
// (float)
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Ceiling")) {
opcode = OP_CEILF;
} else if (!strcmp (cmethod->name, "Cos")) {
opcode = OP_COSF;
} else if (!strcmp (cmethod->name, "Exp")) {
opcode = OP_EXPF;
} else if (!strcmp (cmethod->name, "Floor")) {
opcode = OP_FLOORF;
} else if (!strcmp (cmethod->name, "Log2")) {
opcode = OP_LOG2F;
} else if (!strcmp (cmethod->name, "Log10")) {
opcode = OP_LOG10F;
} else if (!strcmp (cmethod->name, "Sin")) {
opcode = OP_SINF;
} else if (!strcmp (cmethod->name, "Sqrt")) {
opcode = OP_SQRTF;
} else if (!strcmp (cmethod->name, "Truncate")) {
opcode = OP_TRUNCF;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64)
else if (!strcmp (cmethod->name, "Round") && (mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0) {
// special case: emit vroundss for MathF.Round directly instead of what llvm.round.f32 emits
// to align with CoreCLR behavior
int xreg = alloc_xreg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R4_X, xreg, args [0]->dreg);
int xround = alloc_xreg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_SSE41_ROUNDS, xround, xreg, xreg);
ins->inst_c0 = 0x4; // vroundss xmm0, xmm0, xmm0, 0x4 (mode for rounding)
ins->inst_c1 = MONO_TYPE_R4;
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R4, dreg, xround);
ins->inst_c0 = 0;
ins->inst_c1 = MONO_TYPE_R4;
return ins;
}
#endif
}
// (float, float)
if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Pow")) {
opcode = OP_RPOW;
} else if (!strcmp (cmethod->name, "CopySign")) {
opcode = OP_RCOPYSIGN;
}
}
// (float, float, float)
if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4 && fsig->params [2]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "FusedMultiplyAdd")) {
opcode = OP_FMAF;
}
}
if (opcode) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type);
ins->sreg1 = args [0]->dreg;
if (fsig->param_count > 1) {
ins->sreg2 = args [1]->dreg;
}
if (fsig->param_count > 2) {
ins->sreg3 = args [2]->dreg;
}
g_assert (fsig->param_count <= 3);
MONO_ADD_INS (cfg->cbb, ins);
}
}
if (cmethod->klass == mono_class_try_get_math_class ()) {
// (double)
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
if (!strcmp (cmethod->name, "Abs")) {
opcode = OP_ABS;
} else if (!strcmp (cmethod->name, "Ceiling")) {
opcode = OP_CEIL;
} else if (!strcmp (cmethod->name, "Cos")) {
opcode = OP_COS;
} else if (!strcmp (cmethod->name, "Exp")) {
opcode = OP_EXP;
} else if (!strcmp (cmethod->name, "Floor")) {
opcode = OP_FLOOR;
} else if (!strcmp (cmethod->name, "Log")) {
opcode = OP_LOG;
} else if (!strcmp (cmethod->name, "Log2")) {
opcode = OP_LOG2;
} else if (!strcmp (cmethod->name, "Log10")) {
opcode = OP_LOG10;
} else if (!strcmp (cmethod->name, "Sin")) {
opcode = OP_SIN;
} else if (!strcmp (cmethod->name, "Sqrt")) {
opcode = OP_SQRT;
} else if (!strcmp (cmethod->name, "Truncate")) {
opcode = OP_TRUNC;
}
}
// (double, double)
if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) {
// Max and Min can only be optimized in fast math mode
if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) {
opcode = OP_FMAX;
} else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) {
opcode = OP_FMIN;
} else if (!strcmp (cmethod->name, "Pow")) {
opcode = OP_FPOW;
} else if (!strcmp (cmethod->name, "CopySign")) {
opcode = OP_FCOPYSIGN;
}
}
// (double, double, double)
if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8 && fsig->params [2]->type == MONO_TYPE_R8) {
if (!strcmp (cmethod->name, "FusedMultiplyAdd")) {
opcode = OP_FMA;
}
}
// Math also contains overloads for floats (MathF inlines them)
// (float)
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Abs")) {
opcode = OP_ABSF;
}
}
// (float, float)
if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) {
opcode = OP_RMAX;
} else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) {
opcode = OP_RMIN;
} else if (!strcmp (cmethod->name, "Pow")) {
opcode = OP_RPOW;
}
}
if (opcode && fsig->param_count > 0) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type);
ins->sreg1 = args [0]->dreg;
if (fsig->param_count > 1) {
ins->sreg2 = args [1]->dreg;
}
if (fsig->param_count > 2) {
ins->sreg3 = args [2]->dreg;
}
g_assert (fsig->param_count <= 3);
MONO_ADD_INS (cfg->cbb, ins);
}
opcode = 0;
if (cfg->opt & MONO_OPT_CMOV) {
if (strcmp (cmethod->name, "Min") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMIN;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMIN_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMIN;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMIN_UN;
} else if (strcmp (cmethod->name, "Max") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMAX;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMAX_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMAX;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMAX_UN;
}
}
if (opcode && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
}
if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "Buffer")) {
if (!strcmp (cmethod->name, "Memmove") && fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_PTR && fsig->params [1]->type == MONO_TYPE_PTR) {
MonoBasicBlock *end_bb;
NEW_BBLOCK (cfg, end_bb);
// do nothing if len == 0 (even if src or dst are nulls)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [2]->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, end_bb);
// throw NRE if src or dst are nulls
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [0]->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
MONO_INST_NEW (cfg, ins, OP_MEMMOVE);
ins->sreg1 = args [0]->dreg; // i1* dst
ins->sreg2 = args [1]->dreg; // i1* src
ins->sreg3 = args [2]->dreg; // i32/i64 len
MONO_ADD_INS (cfg->cbb, ins);
MONO_START_BB (cfg, end_bb);
}
}
return ins;
}
static MonoInst*
emit_span_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins;
MonoClassField *ptr_field = mono_class_get_field_from_name_full (cmethod->klass, "_pointer", NULL);
if (!ptr_field)
/* Portable Span<T> */
return NULL;
if (!strcmp (cmethod->name, "get_Item")) {
MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL);
g_assert (length_field);
MonoGenericClass *gclass = mono_class_get_generic_class (cmethod->klass);
MonoClass *param_class = mono_class_from_mono_type_internal (gclass->context.class_inst->type_argv [0]);
if (mini_is_gsharedvt_variable_klass (param_class))
return NULL;
int span_reg = args [0]->dreg;
/* Load _pointer.Value */
int base_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, base_reg, span_reg, ptr_field->offset - MONO_ABI_SIZEOF (MonoObject));
/* Similar to mini_emit_ldelema_1_ins () */
int size = mono_class_array_element_size (param_class);
int index_reg = mini_emit_sext_index_reg (cfg, args [1]);
mini_emit_bounds_check_offset (cfg, span_reg, length_field->offset - MONO_ABI_SIZEOF (MonoObject), index_reg, NULL);
// FIXME: Sign extend index ?
int mult_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index_reg, size);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, add_reg, base_reg, mult_reg);
ins->klass = param_class;
ins->type = STACK_MP;
return ins;
} else if (!strcmp (cmethod->name, "get_Length")) {
MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL);
g_assert (length_field);
/*
* FIXME: This doesn't work with abcrem, since the src is a unique LDADDR not
* the same array object.
*/
MONO_INST_NEW (cfg, ins, OP_LDLEN);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = args [0]->dreg;
ins->inst_imm = length_field->offset - MONO_ABI_SIZEOF (MonoObject);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
return ins;
}
return NULL;
}
static MonoInst*
emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins;
int dreg, align;
MonoGenericContext *ctx = mono_method_get_context (cmethod);
MonoType *t;
if (!strcmp (cmethod->name, "As")) {
g_assert (ctx);
g_assert (ctx->method_inst);
t = ctx->method_inst->type_argv [0];
if (ctx->method_inst->type_argc == 2) {
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_OBJ;
ins->klass = mono_get_object_class ();
return ins;
} else if (ctx->method_inst->type_argc == 1) {
if (mini_is_gsharedvt_variable_type (t))
return NULL;
// Casts the given object to the specified type, performs no dynamic type checking.
g_assert (fsig->param_count == 1);
g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT);
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_OBJ;
ins->klass = mono_class_from_mono_type_internal (ctx->method_inst->type_argv [0]);
return ins;
}
} else if (!strcmp (cmethod->name, "AsPointer")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 1);
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "AsRef")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 1);
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_OBJ;
ins->klass = mono_get_object_class ();
return ins;
} else if (!strcmp (cmethod->name, "AreSame")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "IsAddressLessThan")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_PCLT_UN, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "IsAddressGreaterThan")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_PCGT_UN, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "Add")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
int mul_reg = alloc_preg (cfg);
t = ctx->method_inst->type_argv [0];
MonoInst *esize_ins;
if (mini_is_gsharedvt_variable_type (t)) {
esize_ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF);
if (SIZEOF_REGISTER == 8)
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, esize_ins->dreg, esize_ins->dreg);
} else {
t = mini_type_get_underlying_type (t);
int esize = mono_class_array_element_size (mono_class_from_mono_type_internal (t));
EMIT_NEW_ICONST (cfg, esize_ins, esize);
}
esize_ins->type = STACK_I4;
EMIT_NEW_BIALU (cfg, ins, OP_PMUL, mul_reg, args [1]->dreg, esize_ins->dreg);
ins->type = STACK_PTR;
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, mul_reg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "AddByteOffset")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
if (fsig->params [1]->type == MONO_TYPE_I || fsig->params [1]->type == MONO_TYPE_U) {
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, args [1]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (fsig->params [1]->type == MONO_TYPE_U8) {
int sreg = args [1]->dreg;
if (SIZEOF_REGISTER == 4) {
sreg = alloc_ireg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_LCONV_TO_U4, sreg, args [1]->dreg);
}
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, sreg);
ins->type = STACK_PTR;
return ins;
}
} else if (!strcmp (cmethod->name, "SizeOf")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 0);
t = ctx->method_inst->type_argv [0];
if (mini_is_gsharedvt_variable_type (t)) {
ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF);
} else {
int esize = mono_type_size (t, &align);
EMIT_NEW_ICONST (cfg, ins, esize);
}
ins->type = STACK_I4;
return ins;
} else if (!strcmp (cmethod->name, "ReadUnaligned")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 1);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
return mini_emit_memory_load (cfg, t, args [0], 0, MONO_INST_UNALIGNED);
} else if (!strcmp (cmethod->name, "WriteUnaligned")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
mini_emit_memory_store (cfg, t, args [0], args [1], MONO_INST_UNALIGNED);
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (!strcmp (cmethod->name, "ByteOffset")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [1]->dreg, args [0]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "Unbox")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
MonoClass *klass = mono_class_from_mono_type_internal (t);
int context_used = mini_class_check_context_used (cfg, klass);
return mini_handle_unbox (cfg, klass, args [0], context_used);
} else if (!strcmp (cmethod->name, "Copy")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
MonoClass *klass = mono_class_from_mono_type_internal (t);
mini_emit_memory_copy (cfg, args [0], args [1], klass, FALSE, 0);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "CopyBlock")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], 0);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "CopyBlockUnaligned")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "InitBlock")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], 0);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "InitBlockUnaligned")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED);
return cfg->cbb->last_ins;
}
else if (!strcmp (cmethod->name, "SkipInit")) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (!strcmp (cmethod->name, "SubtractByteOffset")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [0]->dreg, args [1]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "IsNullRef")) {
g_assert (fsig->param_count == 1);
MONO_EMIT_NEW_COMPARE_IMM (cfg, args [0]->dreg, 0);
int dreg = alloc_ireg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "NullRef")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 0);
EMIT_NEW_PCONST (cfg, ins, NULL);
ins->type = STACK_MP;
ins->klass = mono_class_from_mono_type_internal (fsig->ret);
return ins;
}
return NULL;
}
static MonoInst*
emit_jit_helpers_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins;
int dreg;
MonoGenericContext *ctx = mono_method_get_context (cmethod);
MonoType *t;
if (!strcmp (cmethod->name, "EnumEquals") || !strcmp (cmethod->name, "EnumCompareTo")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
if (mini_is_gsharedvt_variable_type (t))
return NULL;
gboolean is_i8 = (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8);
gboolean is_unsigned = (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_U4 || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U);
int cmp_op, ceq_op, cgt_op, clt_op;
if (is_i8) {
cmp_op = OP_LCOMPARE;
ceq_op = OP_LCEQ;
cgt_op = is_unsigned ? OP_LCGT_UN : OP_LCGT;
clt_op = is_unsigned ? OP_LCLT_UN : OP_LCLT;
} else {
cmp_op = OP_ICOMPARE;
ceq_op = OP_ICEQ;
cgt_op = is_unsigned ? OP_ICGT_UN : OP_ICGT;
clt_op = is_unsigned ? OP_ICLT_UN : OP_ICLT;
}
if (!strcmp (cmethod->name, "EnumEquals")) {
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, ceq_op, dreg, -1);
} else {
// Use the branchless code (a > b) - (a < b)
int reg1, reg2;
reg1 = alloc_ireg (cfg);
reg2 = alloc_ireg (cfg);
dreg = alloc_ireg (cfg);
if (t->type >= MONO_TYPE_BOOLEAN && t->type <= MONO_TYPE_U2)
{
// Use "a - b" for small types (smaller than Int32)
EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, args [0]->dreg, args [1]->dreg);
}
else
{
EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, cgt_op, reg1, -1);
EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, clt_op, reg2, -1);
EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, reg1, reg2);
}
}
return ins;
}
return NULL;
}
static gboolean
byref_arg_is_reference (MonoType *t)
{
g_assert (m_type_is_byref (t));
return mini_type_is_reference (m_class_get_byval_arg (mono_class_from_mono_type_internal (t)));
}
/*
* If INS represents the result of an ldtoken+Type::GetTypeFromHandle IL sequence,
* return the type.
*/
static MonoClass*
get_class_from_ldtoken_ins (MonoInst *ins)
{
// FIXME: The JIT case uses PCONST
if (ins->opcode == OP_AOTCONST) {
if (ins->inst_p1 != (gpointer)MONO_PATCH_INFO_TYPE_FROM_HANDLE)
return NULL;
MonoJumpInfoToken *token = (MonoJumpInfoToken*)ins->inst_p0;
MonoClass *handle_class;
ERROR_DECL (error);
gpointer handle = mono_ldtoken_checked (token->image, token->token, &handle_class, NULL, error);
mono_error_assert_ok (error);
MonoType *t = (MonoType*)handle;
return mono_class_from_mono_type_internal (t);
} else if (ins->opcode == OP_RTTYPE) {
return (MonoClass*)ins->inst_p0;
} else {
return NULL;
}
}
/*
* Given two instructions representing rttypes, return
* their relation (EQ/NE/NONE).
*/
static CompRelation
get_rttype_ins_relation (MonoInst *ins1, MonoInst *ins2)
{
MonoClass *k1 = get_class_from_ldtoken_ins (ins1);
MonoClass *k2 = get_class_from_ldtoken_ins (ins2);
CompRelation rel = CMP_UNORD;
if (k1 && k2) {
MonoType *t1 = m_class_get_byval_arg (k1);
MonoType *t2 = m_class_get_byval_arg (k2);
MonoType *constraint1 = NULL;
/* Common case in gshared BCL code: t1 is a gshared type like T_INT, and t2 is a concrete type */
if (mono_class_is_gparam (k1)) {
MonoGenericParam *gparam = t1->data.generic_param;
constraint1 = gparam->gshared_constraint;
}
if (constraint1) {
if (constraint1->type == MONO_TYPE_OBJECT) {
if (MONO_TYPE_IS_PRIMITIVE (t2) || MONO_TYPE_ISSTRUCT (t2))
rel = CMP_NE;
} else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) {
if (MONO_TYPE_IS_PRIMITIVE (t2) && constraint1->type != t2->type)
rel = CMP_NE;
else if (MONO_TYPE_IS_REFERENCE (t2))
rel = CMP_NE;
}
}
}
return rel;
}
MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized)
{
MonoInst *ins = NULL;
MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
*ins_type_initialized = FALSE;
const char* cmethod_klass_name_space;
if (m_class_get_nested_in (cmethod->klass))
cmethod_klass_name_space = m_class_get_name_space (m_class_get_nested_in (cmethod->klass));
else
cmethod_klass_name_space = m_class_get_name_space (cmethod->klass);
const char* cmethod_klass_name = m_class_get_name (cmethod->klass);
MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass);
gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib;
/* Required intrinsics are always used even with -O=-intrins */
if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System") &&
!strcmp (cmethod_klass_name, "ByReference`1") &&
!strcmp (cmethod->name, "get_Value")) {
g_assert (fsig->hasthis && fsig->param_count == 0);
int dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, args [0]->dreg, 0);
return ins;
}
if (!(cfg->opt & MONO_OPT_INTRINS))
return NULL;
if (cmethod->klass == mono_defaults.string_class) {
if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
int dreg = alloc_ireg (cfg);
int index_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
#if SIZEOF_REGISTER == 8
if (COMPILE_LLVM (cfg)) {
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
} else {
/* The array reg is 64 bits but the index reg is only 32 */
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
}
#else
index_reg = args [1]->dreg;
#endif
MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
#if defined(TARGET_X86) || defined(TARGET_AMD64)
EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
add_reg = ins->dreg;
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
add_reg, 0);
#else
int mult_reg = alloc_preg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
#endif
mini_type_from_op (cfg, ins, NULL, NULL);
return ins;
} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
/* Decompose later to allow more optimizations */
EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
ins->type = STACK_I4;
ins->flags |= MONO_INST_FAULT;
cfg->cbb->needs_decompose = TRUE;
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
return ins;
} else
return NULL;
} else if (cmethod->klass == mono_defaults.object_class) {
if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg_ref (cfg);
int vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
mini_type_from_op (cfg, ins, NULL, NULL);
mini_type_to_eval_stack_type (cfg, fsig->ret, ins);
ins->klass = mono_defaults.runtimetype_class;
*ins_type_initialized = TRUE;
return ins;
} else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
int dreg = alloc_ireg (cfg);
int t1 = alloc_ireg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, t1, args [0]->dreg, 3);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
ins->type = STACK_I4;
return ins;
} else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else
return NULL;
} else if (cmethod->klass == mono_defaults.array_class) {
if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "GetGenericValueImpl") == 0)
return emit_array_generic_access (cfg, fsig, args, FALSE);
else if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "SetGenericValueImpl") == 0)
return emit_array_generic_access (cfg, fsig, args, TRUE);
else if (!strcmp (cmethod->name, "GetElementSize")) {
int vt_reg = alloc_preg (cfg);
int class_reg = alloc_preg (cfg);
int sizes_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, class_reg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, sizes_reg, class_reg, m_class_offsetof_sizes ());
return ins;
} else if (!strcmp (cmethod->name, "IsPrimitive")) {
int dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_IS_PRIMITIVE);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0);
EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1);
ins->type = STACK_I4;
return ins;
}
#ifndef MONO_BIG_ARRAYS
/*
* This is an inline version of GetLength/GetLowerBound(0) used frequently in
* Array methods.
*/
else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
(strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
int dreg = alloc_ireg (cfg);
int bounds_reg = alloc_ireg_mp (cfg);
MonoBasicBlock *end_bb, *szarray_bb;
gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
NEW_BBLOCK (cfg, end_bb);
NEW_BBLOCK (cfg, szarray_bb);
EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
/* Non-szarray case */
if (get_length)
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
else
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, szarray_bb);
/* Szarray case */
if (get_length)
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
else
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
MONO_START_BB (cfg, end_bb);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
ins->type = STACK_I4;
return ins;
}
#endif
if (cmethod->name [0] != 'g')
return NULL;
if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
int vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
mini_type_from_op (cfg, ins, NULL, NULL);
return ins;
} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
mini_type_from_op (cfg, ins, NULL, NULL);
return ins;
} else
return NULL;
} else if (cmethod->klass == runtime_helpers_class) {
if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
return ins;
} else if (!strcmp (cmethod->name, "GetRawData")) {
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_ABI_SIZEOF (MonoObject));
return ins;
} else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
MonoType *arg_type = ctx->method_inst->type_argv [0];
MonoType *t;
MonoClass *klass;
ins = NULL;
/* Resolve the argument class as possible so we can handle common cases fast */
t = mini_get_underlying_type (arg_type);
klass = mono_class_from_mono_type_internal (t);
mono_class_init_internal (klass);
if (MONO_TYPE_IS_REFERENCE (t))
EMIT_NEW_ICONST (cfg, ins, 1);
else if (MONO_TYPE_IS_PRIMITIVE (t))
EMIT_NEW_ICONST (cfg, ins, 0);
else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
EMIT_NEW_ICONST (cfg, ins, 1);
else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
EMIT_NEW_ICONST (cfg, ins, m_class_has_references (klass) ? 1 : 0);
else {
g_assert (cfg->gshared);
/* Have to use the original argument class here */
MonoClass *arg_class = mono_class_from_mono_type_internal (arg_type);
int context_used = mini_class_check_context_used (cfg, arg_class);
/* This returns 1 or 2 */
MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
int dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
}
return ins;
} else if (strcmp (cmethod->name, "IsBitwiseEquatable") == 0 && fsig->param_count == 0) {
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
MonoType *arg_type = ctx->method_inst->type_argv [0];
MonoType *t;
ins = NULL;
/* Resolve the argument class as possible so we can handle common cases fast */
t = mini_get_underlying_type (arg_type);
if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8)
EMIT_NEW_ICONST (cfg, ins, 1);
else
EMIT_NEW_ICONST (cfg, ins, 0);
return ins;
} else if (!strcmp (cmethod->name, "ObjectHasComponentSize")) {
g_assert (fsig->param_count == 1);
g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT);
// Return true for arrays and string
int dreg;
dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_OR_STRING);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0);
EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1);
ins->type = STACK_I4;
return ins;
} else if (!strcmp (cmethod->name, "ObjectHasReferences")) {
int dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_HAS_REFERENCES);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0);
EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1);
ins->type = STACK_I4;
return ins;
} else
return NULL;
} else if (cmethod->klass == mono_class_try_get_memory_marshal_class ()) {
if (!strcmp (cmethod->name, "GetArrayDataReference")) {
// Logic below works for both SZARRAY and MDARRAY
int dreg = alloc_preg (cfg);
MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg, FALSE);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
return ins;
}
} else if (cmethod->klass == mono_defaults.monitor_class) {
gboolean is_enter = FALSE;
gboolean is_v4 = FALSE;
if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && m_type_is_byref (fsig->params [1])) {
is_enter = TRUE;
is_v4 = TRUE;
}
if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
is_enter = TRUE;
if (is_enter) {
/*
* To make async stack traces work, icalls which can block should have a wrapper.
* For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
*/
MonoBasicBlock *end_bb;
NEW_BBLOCK (cfg, end_bb);
if (is_v4)
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_fast, args);
else
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_fast, args);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
if (is_v4)
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_internal, args);
else
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_internal, args);
MONO_START_BB (cfg, end_bb);
return ins;
}
} else if (cmethod->klass == mono_defaults.thread_class) {
if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
} else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1 && m_type_is_byref (fsig->params [0])) {
guint32 opcode = 0;
gboolean is_ref = byref_arg_is_reference (fsig->params [0]);
if (fsig->params [0]->type == MONO_TYPE_I1)
opcode = OP_LOADI1_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_U1)
opcode = OP_LOADU1_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_I2)
opcode = OP_LOADI2_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_U2)
opcode = OP_LOADU2_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_LOADI4_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_LOADU4_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LOADI8_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_R4)
opcode = OP_LOADR4_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_R8)
opcode = OP_LOADR8_MEMBASE;
else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
opcode = OP_LOAD_MEMBASE;
if (opcode) {
MONO_INST_NEW (cfg, ins, opcode);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
MONO_ADD_INS (cfg->cbb, ins);
switch (fsig->params [0]->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
ins->dreg = mono_alloc_ireg (cfg);
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
ins->dreg = mono_alloc_lreg (cfg);
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
ins->dreg = mono_alloc_ireg (cfg);
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
ins->dreg = mono_alloc_freg (cfg);
ins->type = STACK_R8;
break;
default:
g_assert (is_ref);
ins->dreg = mono_alloc_ireg_ref (cfg);
ins->type = STACK_OBJ;
break;
}
if (opcode == OP_LOADI8_MEMBASE)
ins = mono_decompose_opcode (cfg, ins);
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
return ins;
}
} else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) {
guint32 opcode = 0;
gboolean is_ref = byref_arg_is_reference (fsig->params [0]);
if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
opcode = OP_STOREI1_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
opcode = OP_STOREI2_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_STOREI4_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_STOREI8_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_R4)
opcode = OP_STORER4_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_R8)
opcode = OP_STORER8_MEMBASE_REG;
else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
opcode = OP_STORE_MEMBASE_REG;
if (opcode) {
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
MONO_INST_NEW (cfg, ins, opcode);
ins->sreg1 = args [1]->dreg;
ins->inst_destbasereg = args [0]->dreg;
ins->inst_offset = 0;
MONO_ADD_INS (cfg->cbb, ins);
if (opcode == OP_STOREI8_MEMBASE_REG)
ins = mono_decompose_opcode (cfg, ins);
return ins;
}
}
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Threading") == 0) &&
(strcmp (cmethod_klass_name, "Interlocked") == 0)) {
ins = NULL;
#if SIZEOF_REGISTER == 8
if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
ins->dreg = mono_alloc_preg (cfg);
ins->sreg1 = args [0]->dreg;
ins->type = STACK_I8;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
MONO_ADD_INS (cfg->cbb, ins);
} else {
MonoInst *load_ins;
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
/* 64 bit reads are already atomic */
MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
load_ins->dreg = mono_alloc_preg (cfg);
load_ins->inst_basereg = args [0]->dreg;
load_ins->inst_offset = 0;
load_ins->type = STACK_I8;
MONO_ADD_INS (cfg->cbb, load_ins);
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
ins = load_ins;
}
}
#endif
if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
MonoInst *ins_iconst;
guint32 opcode = 0;
if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_ADD_I4;
cfg->has_atomic_add_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_I8;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
ins_iconst->inst_c0 = 1;
ins_iconst->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins_iconst);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = ins_iconst->dreg;
ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
} else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
MonoInst *ins_iconst;
guint32 opcode = 0;
if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_ADD_I4;
cfg->has_atomic_add_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_I8;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
ins_iconst->inst_c0 = -1;
ins_iconst->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins_iconst);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = ins_iconst->dreg;
ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
} else if (fsig->param_count == 2 &&
((strcmp (cmethod->name, "Add") == 0) ||
(strcmp (cmethod->name, "And") == 0) ||
(strcmp (cmethod->name, "Or") == 0))) {
guint32 opcode = 0;
guint32 opcode_i4 = 0;
guint32 opcode_i8 = 0;
if (strcmp (cmethod->name, "Add") == 0) {
opcode_i4 = OP_ATOMIC_ADD_I4;
opcode_i8 = OP_ATOMIC_ADD_I8;
} else if (strcmp (cmethod->name, "And") == 0) {
opcode_i4 = OP_ATOMIC_AND_I4;
opcode_i8 = OP_ATOMIC_AND_I8;
} else if (strcmp (cmethod->name, "Or") == 0) {
opcode_i4 = OP_ATOMIC_OR_I4;
opcode_i8 = OP_ATOMIC_OR_I8;
} else {
g_assert_not_reached ();
}
if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = opcode_i4;
cfg->has_atomic_add_i4 = TRUE;
} else if (fsig->params [0]->type == MONO_TYPE_I8 && SIZEOF_REGISTER == 8) {
opcode = opcode_i8;
}
// For now, only Add is supported in non-LLVM back-ends
if (opcode && (COMPILE_LLVM (cfg) || mono_arch_opcode_supported (opcode))) {
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
ins->type = (opcode == opcode_i4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
}
else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) {
MonoInst *f2i = NULL, *i2f;
guint32 opcode, f2i_opcode, i2f_opcode;
gboolean is_ref = byref_arg_is_reference (fsig->params [0]);
gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
if (fsig->params [0]->type == MONO_TYPE_I4 ||
fsig->params [0]->type == MONO_TYPE_R4) {
opcode = OP_ATOMIC_EXCHANGE_I4;
f2i_opcode = OP_MOVE_F_TO_I4;
i2f_opcode = OP_MOVE_I4_TO_F;
cfg->has_atomic_exchange_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (is_ref ||
fsig->params [0]->type == MONO_TYPE_I8 ||
fsig->params [0]->type == MONO_TYPE_R8 ||
fsig->params [0]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_EXCHANGE_I8;
f2i_opcode = OP_MOVE_F_TO_I8;
i2f_opcode = OP_MOVE_I8_TO_F;
}
#else
else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_EXCHANGE_I4;
cfg->has_atomic_exchange_i4 = TRUE;
}
#endif
else
return NULL;
if (!mono_arch_opcode_supported (opcode))
return NULL;
if (is_float) {
/* TODO: Decompose these opcodes instead of bailing here. */
if (COMPILE_SOFT_FLOAT (cfg))
return NULL;
MONO_INST_NEW (cfg, f2i, f2i_opcode);
f2i->dreg = mono_alloc_ireg (cfg);
f2i->sreg1 = args [1]->dreg;
if (f2i_opcode == OP_MOVE_F_TO_I4)
f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, f2i);
}
if (is_ref && !mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
switch (fsig->params [0]->type) {
case MONO_TYPE_I4:
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
ins->type = STACK_R8;
break;
default:
g_assert (is_ref);
ins->type = STACK_OBJ;
break;
}
if (is_float) {
MONO_INST_NEW (cfg, i2f, i2f_opcode);
i2f->dreg = mono_alloc_freg (cfg);
i2f->sreg1 = ins->dreg;
i2f->type = STACK_R8;
if (i2f_opcode == OP_MOVE_I4_TO_F)
i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, i2f);
ins = i2f;
}
if (cfg->gen_write_barriers && is_ref)
mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
guint32 opcode, f2i_opcode, i2f_opcode;
gboolean is_ref = mini_type_is_reference (fsig->params [1]);
gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
if (fsig->params [1]->type == MONO_TYPE_I4 ||
fsig->params [1]->type == MONO_TYPE_R4) {
opcode = OP_ATOMIC_CAS_I4;
f2i_opcode = OP_MOVE_F_TO_I4;
i2f_opcode = OP_MOVE_I4_TO_F;
cfg->has_atomic_cas_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (is_ref ||
fsig->params [1]->type == MONO_TYPE_I8 ||
fsig->params [1]->type == MONO_TYPE_R8 ||
fsig->params [1]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_CAS_I8;
f2i_opcode = OP_MOVE_F_TO_I8;
i2f_opcode = OP_MOVE_I8_TO_F;
}
#else
else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_CAS_I4;
cfg->has_atomic_cas_i4 = TRUE;
}
#endif
else
return NULL;
if (!mono_arch_opcode_supported (opcode))
return NULL;
if (is_float) {
/* TODO: Decompose these opcodes instead of bailing here. */
if (COMPILE_SOFT_FLOAT (cfg))
return NULL;
MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
f2i_new->dreg = mono_alloc_ireg (cfg);
f2i_new->sreg1 = args [1]->dreg;
if (f2i_opcode == OP_MOVE_F_TO_I4)
f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, f2i_new);
MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
f2i_cmp->dreg = mono_alloc_ireg (cfg);
f2i_cmp->sreg1 = args [2]->dreg;
if (f2i_opcode == OP_MOVE_F_TO_I4)
f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, f2i_cmp);
}
if (is_ref && !mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
switch (fsig->params [1]->type) {
case MONO_TYPE_I4:
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
ins->type = cfg->r4_stack_type;
break;
case MONO_TYPE_R8:
ins->type = STACK_R8;
break;
default:
g_assert (mini_type_is_reference (fsig->params [1]));
ins->type = STACK_OBJ;
break;
}
if (is_float) {
MONO_INST_NEW (cfg, i2f, i2f_opcode);
i2f->dreg = mono_alloc_freg (cfg);
i2f->sreg1 = ins->dreg;
i2f->type = STACK_R8;
if (i2f_opcode == OP_MOVE_I4_TO_F)
i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, i2f);
ins = i2f;
}
if (cfg->gen_write_barriers && is_ref)
mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
fsig->params [1]->type == MONO_TYPE_I4) {
MonoInst *cmp, *ceq;
if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
return NULL;
/* int32 r = CAS (location, value, comparand); */
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->sreg3 = args [2]->dreg;
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
/* bool result = r == comparand; */
MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
cmp->sreg1 = ins->dreg;
cmp->sreg2 = args [2]->dreg;
cmp->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, cmp);
MONO_INST_NEW (cfg, ceq, OP_ICEQ);
ceq->dreg = alloc_ireg (cfg);
ceq->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ceq);
/* *success = result; */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
cfg->has_atomic_cas_i4 = TRUE;
}
else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
if (ins)
return ins;
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Threading") == 0) &&
(strcmp (cmethod_klass_name, "Volatile") == 0)) {
ins = NULL;
if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
guint32 opcode = 0;
MonoType *t = fsig->params [0];
gboolean is_ref;
gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
g_assert (m_type_is_byref (t));
is_ref = byref_arg_is_reference (t);
if (t->type == MONO_TYPE_I1)
opcode = OP_ATOMIC_LOAD_I1;
else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
opcode = OP_ATOMIC_LOAD_U1;
else if (t->type == MONO_TYPE_I2)
opcode = OP_ATOMIC_LOAD_I2;
else if (t->type == MONO_TYPE_U2)
opcode = OP_ATOMIC_LOAD_U2;
else if (t->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_LOAD_I4;
else if (t->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_LOAD_U4;
else if (t->type == MONO_TYPE_R4)
opcode = OP_ATOMIC_LOAD_R4;
else if (t->type == MONO_TYPE_R8)
opcode = OP_ATOMIC_LOAD_R8;
#if SIZEOF_REGISTER == 8
else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_LOAD_I8;
else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_LOAD_U8;
#else
else if (t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_LOAD_I4;
else if (is_ref || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_LOAD_U4;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
ins->sreg1 = args [0]->dreg;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
MONO_ADD_INS (cfg->cbb, ins);
switch (t->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
ins->type = cfg->r4_stack_type;
break;
case MONO_TYPE_R8:
ins->type = STACK_R8;
break;
default:
g_assert (is_ref);
ins->type = STACK_OBJ;
break;
}
}
}
if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
guint32 opcode = 0;
MonoType *t = fsig->params [0];
gboolean is_ref;
g_assert (m_type_is_byref (t));
is_ref = byref_arg_is_reference (t);
if (t->type == MONO_TYPE_I1)
opcode = OP_ATOMIC_STORE_I1;
else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
opcode = OP_ATOMIC_STORE_U1;
else if (t->type == MONO_TYPE_I2)
opcode = OP_ATOMIC_STORE_I2;
else if (t->type == MONO_TYPE_U2)
opcode = OP_ATOMIC_STORE_U2;
else if (t->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_STORE_I4;
else if (t->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_STORE_U4;
else if (t->type == MONO_TYPE_R4)
opcode = OP_ATOMIC_STORE_R4;
else if (t->type == MONO_TYPE_R8)
opcode = OP_ATOMIC_STORE_R8;
#if SIZEOF_REGISTER == 8
else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_STORE_I8;
else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_STORE_U8;
#else
else if (t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_STORE_I4;
else if (is_ref || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_STORE_U4;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = args [0]->dreg;
ins->sreg1 = args [1]->dreg;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
MONO_ADD_INS (cfg->cbb, ins);
if (cfg->gen_write_barriers && is_ref)
mini_emit_write_barrier (cfg, args [0], args [1]);
}
}
if (ins)
return ins;
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Diagnostics") == 0) &&
(strcmp (cmethod_klass_name, "Debugger") == 0)) {
if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
}
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Reflection") == 0) &&
(strcmp (cmethod_klass_name, "Assembly") == 0)) {
if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
/* No stack walks are currently available, so implement this as an intrinsic */
MonoInst *assembly_ins;
EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, m_class_get_image (cfg->method->klass));
ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
return ins;
}
// While it is not required per
// https://msdn.microsoft.com/en-us/library/system.reflection.assembly.getcallingassembly(v=vs.110).aspx.
// have GetCallingAssembly be consistent independently of varying optimization.
// This fixes mono/tests/test-inline-call-stack.cs under FullAOT+LLVM.
cfg->no_inline |= COMPILE_LLVM (cfg) && strcmp (cmethod->name, "GetCallingAssembly") == 0;
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Reflection") == 0) &&
(strcmp (cmethod_klass_name, "MethodBase") == 0)) {
if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
/* No stack walks are currently available, so implement this as an intrinsic */
MonoInst *method_ins;
MonoMethod *declaring = cfg->method;
/* This returns the declaring generic method */
if (declaring->is_inflated)
declaring = ((MonoMethodInflated*)cfg->method)->declaring;
EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
cfg->no_inline = TRUE;
if (cfg->method != cfg->current_method)
mini_set_inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
return ins;
}
} else if (cmethod->klass == mono_class_try_get_math_class ()) {
/*
* There is general branchless code for Min/Max, but it does not work for
* all inputs:
* http://everything2.com/?node_id=1051618
*/
/*
* Constant folding for various Math methods.
* we avoid folding constants that when computed would raise an error, in
* case the user code was expecting to get that error raised
*/
if (fsig->param_count == 1 && args [0]->opcode == OP_R8CONST){
double source = *(double *)args [0]->inst_p0;
int opcode = 0;
const char *mname = cmethod->name;
char c = mname [0];
if (c == 'A'){
if (strcmp (mname, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
opcode = OP_ABS;
} else if (strcmp (mname, "Asin") == 0){
if (fabs (source) <= 1)
opcode = OP_ASIN;
} else if (strcmp (mname, "Asinh") == 0){
opcode = OP_ASINH;
} else if (strcmp (mname, "Acos") == 0){
if (fabs (source) <= 1)
opcode = OP_ACOS;
} else if (strcmp (mname, "Acosh") == 0){
if (source >= 1)
opcode = OP_ACOSH;
} else if (strcmp (mname, "Atan") == 0){
opcode = OP_ATAN;
} else if (strcmp (mname, "Atanh") == 0){
if (fabs (source) < 1)
opcode = OP_ATANH;
}
} else if (c == 'C'){
if (strcmp (mname, "Cos") == 0) {
if (!isinf (source))
opcode = OP_COS;
} else if (strcmp (mname, "Cbrt") == 0){
opcode = OP_CBRT;
} else if (strcmp (mname, "Cosh") == 0){
opcode = OP_COSH;
}
} else if (c == 'R'){
if (strcmp (mname, "Round") == 0)
opcode = OP_ROUND;
} else if (c == 'S'){
if (strcmp (mname, "Sin") == 0) {
if (!isinf (source))
opcode = OP_SIN;
} else if (strcmp (mname, "Sqrt") == 0) {
if (source >= 0)
opcode = OP_SQRT;
} else if (strcmp (mname, "Sinh") == 0){
opcode = OP_SINH;
}
} else if (c == 'T'){
if (strcmp (mname, "Tan") == 0){
if (!isinf (source))
opcode = OP_TAN;
} else if (strcmp (mname, "Tanh") == 0){
opcode = OP_TANH;
}
}
if (opcode) {
double *dest = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double));
double result = 0;
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType) ins->type);
ins->inst_p0 = dest;
switch (opcode){
case OP_ABS:
result = fabs (source);
break;
case OP_ACOS:
result = acos (source);
break;
case OP_ACOSH:
result = acosh (source);
break;
case OP_ASIN:
result = asin (source);
break;
case OP_ASINH:
result= asinh (source);
break;
case OP_ATAN:
result = atan (source);
break;
case OP_ATANH:
result = atanh (source);
break;
case OP_CBRT:
result = cbrt (source);
break;
case OP_COS:
result = cos (source);
break;
case OP_COSH:
result = cosh (source);
break;
case OP_ROUND:
result = mono_round_to_even (source);
break;
case OP_SIN:
result = sin (source);
break;
case OP_SINH:
result = sinh (source);
break;
case OP_SQRT:
result = sqrt (source);
break;
case OP_TAN:
result = tan (source);
break;
case OP_TANH:
result = tanh (source);
break;
default:
g_error ("invalid opcode %d", (int)opcode);
}
*dest = result;
MONO_ADD_INS (cfg->cbb, ins);
NULLIFY_INS (args [0]);
return ins;
}
}
} else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality") &&
args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) {
CompRelation rel = get_rttype_ins_relation (args [0], args [1]);
if (rel == CMP_EQ) {
if (cfg->verbose_level > 2)
printf ("-> true\n");
EMIT_NEW_ICONST (cfg, ins, 1);
} else if (rel == CMP_NE) {
if (cfg->verbose_level > 2)
printf ("-> false\n");
EMIT_NEW_ICONST (cfg, ins, 0);
} else {
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
MONO_INST_NEW (cfg, ins, OP_PCEQ);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
} else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Inequality") &&
args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) {
CompRelation rel = get_rttype_ins_relation (args [0], args [1]);
if (rel == CMP_NE) {
if (cfg->verbose_level > 2)
printf ("-> true\n");
EMIT_NEW_ICONST (cfg, ins, 1);
} else if (rel == CMP_EQ) {
if (cfg->verbose_level > 2)
printf ("-> false\n");
EMIT_NEW_ICONST (cfg, ins, 0);
} else {
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
MONO_INST_NEW (cfg, ins, OP_ICNEQ);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
} else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "get_IsValueType") &&
args [0]->klass == mono_defaults.runtimetype_class) {
MonoClass *k1 = get_class_from_ldtoken_ins (args [0]);
if (k1) {
MonoType *t1 = m_class_get_byval_arg (k1);
MonoType *constraint1 = NULL;
/* Common case in gshared BCL code: t1 is a gshared type like T_INT */
if (mono_class_is_gparam (k1)) {
MonoGenericParam *gparam = t1->data.generic_param;
constraint1 = gparam->gshared_constraint;
if (constraint1) {
if (constraint1->type == MONO_TYPE_OBJECT) {
if (cfg->verbose_level > 2)
printf ("-> false\n");
EMIT_NEW_ICONST (cfg, ins, 0);
return ins;
} else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) {
if (cfg->verbose_level > 2)
printf ("-> true\n");
EMIT_NEW_ICONST (cfg, ins, 1);
return ins;
}
}
}
}
return NULL;
} else if (((!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.iOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.TVOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.MacCatalyst") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.Mac") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.iOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.tvOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.MacCatalyst") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.macOS")) &&
!strcmp (cmethod_klass_name_space, "ObjCRuntime") &&
!strcmp (cmethod_klass_name, "Selector"))
) {
if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
cfg->compile_aot) {
MonoInst *pi;
MonoJumpInfoToken *ji;
char *s;
if (args [0]->opcode == OP_GOT_ENTRY) {
pi = (MonoInst *)args [0]->inst_p1;
g_assert (pi->opcode == OP_PATCH_INFO);
g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
ji = (MonoJumpInfoToken *)pi->inst_p0;
} else {
g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
ji = (MonoJumpInfoToken *)args [0]->inst_p0;
}
NULLIFY_INS (args [0]);
s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), cfg->error);
return_val_if_nok (cfg->error, NULL);
MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
ins->dreg = mono_alloc_ireg (cfg);
// FIXME: Leaks
ins->inst_p0 = s;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Runtime.InteropServices") == 0) &&
(strcmp (cmethod_klass_name, "Marshal") == 0)) {
//Convert Marshal.PtrToStructure<T> of blittable T to direct loads
if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
cmethod->is_inflated &&
fsig->param_count == 1 &&
!mini_method_check_context_used (cfg, cmethod)) {
MonoGenericContext *method_context = mono_method_get_context (cmethod);
MonoType *arg0 = method_context->method_inst->type_argv [0];
if (mono_type_is_native_blittable (arg0))
return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
}
} else if (cmethod->klass == mono_defaults.enum_class && !strcmp (cmethod->name, "HasFlag") &&
args [0]->opcode == OP_BOX && args [1]->opcode == OP_BOX_ICONST && args [0]->klass == args [1]->klass) {
args [1]->opcode = OP_ICONST;
ins = mini_handle_enum_has_flag (cfg, args [0]->klass, NULL, args [0]->sreg1, args [1]);
NULLIFY_INS (args [0]);
return ins;
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System") &&
(!strcmp (cmethod_klass_name, "Span`1") || !strcmp (cmethod_klass_name, "ReadOnlySpan`1"))) {
return emit_span_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") &&
!strcmp (cmethod_klass_name, "Unsafe")) {
return emit_unsafe_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") &&
!strcmp (cmethod_klass_name, "JitHelpers")) {
return emit_jit_helpers_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System") == 0) &&
(strcmp (cmethod_klass_name, "Activator") == 0)) {
MonoGenericContext *method_context = mono_method_get_context (cmethod);
if (!strcmp (cmethod->name, "CreateInstance") &&
fsig->param_count == 0 &&
method_context != NULL &&
method_context->method_inst->type_argc == 1 &&
cmethod->is_inflated &&
!mini_method_check_context_used (cfg, cmethod)) {
MonoType *t = method_context->method_inst->type_argv [0];
MonoClass *arg0 = mono_class_from_mono_type_internal (t);
if (m_class_is_valuetype (arg0) && !mono_class_has_default_constructor (arg0, FALSE)) {
if (m_class_is_primitive (arg0)) {
int dreg = alloc_dreg (cfg, mini_type_to_stack_type (cfg, t));
mini_emit_init_rvar (cfg, dreg, t);
ins = cfg->cbb->last_ins;
} else {
MONO_INST_NEW (cfg, ins, MONO_CLASS_IS_SIMD (cfg, arg0) ? OP_XZERO : OP_VZERO);
ins->dreg = mono_alloc_dreg (cfg, STACK_VTYPE);
ins->type = STACK_VTYPE;
ins->klass = arg0;
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
}
}
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (cfg->opt & MONO_OPT_SIMD) {
ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
}
#endif
/* Fallback if SIMD is disabled */
if (in_corlib && !strcmp ("System.Numerics", cmethod_klass_name_space) && !strcmp ("Vector", cmethod_klass_name)) {
if (!strcmp (cmethod->name, "get_IsHardwareAccelerated")) {
EMIT_NEW_ICONST (cfg, ins, 0);
ins->type = STACK_I4;
return ins;
}
}
// Return false for IsSupported for all types in System.Runtime.Intrinsics.*
// if it's not handled in mono_emit_simd_intrinsics
if (in_corlib &&
!strncmp ("System.Runtime.Intrinsics", cmethod_klass_name_space, 25) &&
!strcmp (cmethod->name, "get_IsSupported")) {
EMIT_NEW_ICONST (cfg, ins, 0);
ins->type = STACK_I4;
return ins;
}
// Return false for RuntimeFeature.IsDynamicCodeSupported and RuntimeFeature.IsDynamicCodeCompiled on FullAOT, otherwise true
if (in_corlib &&
!strcmp ("System.Runtime.CompilerServices", cmethod_klass_name_space) &&
!strcmp ("RuntimeFeature", cmethod_klass_name)) {
if (!strcmp (cmethod->name, "get_IsDynamicCodeCompiled")) {
EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? 0 : 1);
ins->type = STACK_I4;
return ins;
} else if (!strcmp (cmethod->name, "get_IsDynamicCodeSupported")) {
EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? (cfg->interp ? 1 : 0) : 1);
ins->type = STACK_I4;
return ins;
}
}
if (in_corlib &&
!strcmp ("System", cmethod_klass_name_space) &&
!strcmp ("ThrowHelper", cmethod_klass_name)) {
if (!strcmp ("ThrowForUnsupportedNumericsVectorBaseType", cmethod->name)) {
/* The mono JIT can't optimize the body of this method away */
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
MonoType *t = ctx->method_inst->type_argv [0];
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_I:
case MONO_TYPE_U:
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
default:
break;
}
}
else if (!strcmp ("ThrowForUnsupportedIntrinsicsVector64BaseType", cmethod->name) ||
!strcmp ("ThrowForUnsupportedIntrinsicsVector128BaseType", cmethod->name) ||
!strcmp ("ThrowForUnsupportedIntrinsicsVector256BaseType", cmethod->name)) {
/* The mono JIT can't optimize the body of this method away */
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
MonoType *t = ctx->method_inst->type_argv [0];
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
default:
break;
}
}
}
ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
if (COMPILE_LLVM (cfg)) {
ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args, in_corlib);
if (ins)
return ins;
}
return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
}
static MonoInst*
emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
{
MonoClass *eklass;
if (is_set)
eklass = mono_class_from_mono_type_internal (fsig->params [2]);
else
eklass = mono_class_from_mono_type_internal (fsig->ret);
if (is_set) {
return mini_emit_array_store (cfg, eklass, args, FALSE);
} else {
MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (eklass), addr->dreg, 0);
return ins;
}
}
static gboolean
is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
{
uint32_t align;
int param_size, return_size;
param_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (param_klass)));
return_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (return_klass)));
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", m_class_get_name (return_klass), m_class_get_name (param_klass));
//Don't allow mixing reference types with value types
if (m_class_is_valuetype (param_klass) != m_class_is_valuetype (return_klass)) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
return FALSE;
}
if (!m_class_is_valuetype (param_klass)) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
return TRUE;
}
//That are blitable
if (m_class_has_references (param_klass) || m_class_has_references (return_klass))
return FALSE;
MonoType *param_type = m_class_get_byval_arg (param_klass);
MonoType *return_type = m_class_get_byval_arg (return_klass);
/* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
if ((MONO_TYPE_ISSTRUCT (param_type) && !MONO_TYPE_ISSTRUCT (return_type)) ||
(!MONO_TYPE_ISSTRUCT (param_type) && MONO_TYPE_ISSTRUCT (return_type))) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
return FALSE;
}
if (param_type->type == MONO_TYPE_R4 || param_type->type == MONO_TYPE_R8 ||
return_type->type == MONO_TYPE_R4 || return_type->type == MONO_TYPE_R8) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
return FALSE;
}
param_size = mono_class_value_size (param_klass, &align);
return_size = mono_class_value_size (return_klass, &align);
//We can do it if sizes match
if (param_size == return_size) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
return TRUE;
}
//No simple way to handle struct if sizes don't match
if (MONO_TYPE_ISSTRUCT (param_type)) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
return FALSE;
}
/*
* Same reg size category.
* A quick note on why we don't require widening here.
* The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
*
* Since the source value comes from a function argument, the JIT will already have
* the value in a VREG and performed any widening needed before (say, when loading from a field).
*/
if (param_size <= 4 && return_size <= 4) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
return TRUE;
}
return FALSE;
}
static MonoInst*
emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
{
MonoClass *param_klass = mono_class_from_mono_type_internal (fsig->params [0]);
MonoClass *return_klass = mono_class_from_mono_type_internal (fsig->ret);
if (mini_is_gsharedvt_variable_type (fsig->ret))
return NULL;
//Valuetypes that are semantically equivalent or numbers than can be widened to
if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
return args [0];
//Arrays of valuetypes that are semantically equivalent
if (m_class_get_rank (param_klass) == 1 && m_class_get_rank (return_klass) == 1 && is_unsafe_mov_compatible (cfg, m_class_get_element_class (param_klass), m_class_get_element_class (return_klass)))
return args [0];
return NULL;
}
MonoInst*
mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field)
{
MonoClass *klass = m_field_get_parent (field);
const char *klass_name_space = m_class_get_name_space (klass);
const char *klass_name = m_class_get_name (klass);
MonoImage *klass_image = m_class_get_image (klass);
gboolean in_corlib = klass_image == mono_defaults.corlib;
gboolean is_le;
MonoInst *ins;
if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "BitConverter") && !strcmp (field->name, "IsLittleEndian")) {
is_le = (TARGET_BYTE_ORDER == G_LITTLE_ENDIAN);
EMIT_NEW_ICONST (cfg, ins, is_le);
return ins;
} else if ((klass == mono_defaults.int_class || klass == mono_defaults.uint_class) && strcmp (field->name, "Zero") == 0) {
EMIT_NEW_PCONST (cfg, ins, 0);
return ins;
}
return NULL;
}
#else
MONO_EMPTY_SOURCE_FILE (intrinsics);
#endif
| /**
* Intrinsics support
*/
#include <config.h>
#include <glib.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-math.h>
#include <math.h>
#ifndef DISABLE_JIT
#include "mini.h"
#include "mini-runtime.h"
#include "ir-emit.h"
#include "jit-icalls.h"
#include <mono/metadata/abi-details.h>
#include <mono/metadata/class-abi-details.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/monitor.h>
#include <mono/utils/mono-memory-model.h>
static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
static GENERATE_TRY_GET_CLASS_WITH_CACHE (memory_marshal, "System.Runtime.InteropServices", "MemoryMarshal")
static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math")
/* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic calls */
static MonoInst*
emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
{
MonoInst *addr, *store, *load;
MonoClass *eklass = mono_class_from_mono_type_internal (fsig->params [1]);
/* the bounds check is already done by the callers */
addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE);
MonoType *etype = m_class_get_byval_arg (eklass);
if (is_set) {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, args [2]->dreg, 0);
if (!mini_debug_options.weak_memory_model && mini_type_is_reference (etype))
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, addr->dreg, 0, load->dreg);
if (mini_type_is_reference (etype))
mini_emit_write_barrier (cfg, addr, load);
} else {
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, etype, addr->dreg, 0);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, etype, args [2]->dreg, 0, load->dreg);
}
return store;
}
static gboolean
mono_type_is_native_blittable (MonoType *t)
{
if (MONO_TYPE_IS_REFERENCE (t))
return FALSE;
if (MONO_TYPE_IS_PRIMITIVE_SCALAR (t))
return TRUE;
MonoClass *klass = mono_class_from_mono_type_internal (t);
//MonoClass::blitable depends on mono_class_setup_fields being done.
mono_class_setup_fields (klass);
if (!m_class_is_blittable (klass))
return FALSE;
// If the native marshal size is different we can't convert PtrToStructure to a type load
if (mono_class_native_size (klass, NULL) != mono_class_value_size (klass, NULL))
return FALSE;
return TRUE;
}
MonoInst*
mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
const char* cmethod_klass_name_space = m_class_get_name_space (cmethod->klass);
const char* cmethod_klass_name = m_class_get_name (cmethod->klass);
MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass);
gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib;
MonoInst *ins = NULL;
/* Required intrinsics are always used even with -O=-intrins */
if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System") &&
!strcmp (cmethod_klass_name, "ByReference`1")) {
/* public ByReference(ref T value) */
g_assert (fsig->hasthis && fsig->param_count == 1);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [0]->dreg, 0, args [1]->dreg);
return ins;
}
ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
if (!(cfg->opt & MONO_OPT_INTRINS))
return NULL;
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (cfg->opt & MONO_OPT_SIMD) {
ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
}
#endif
return NULL;
}
static MonoInst*
llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean in_corlib)
{
MonoInst *ins = NULL;
int opcode = 0;
// Convert Math and MathF methods into LLVM intrinsics, e.g. MathF.Sin -> @llvm.sin.f32
if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "MathF") && cfg->r4fp) {
// (float)
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Ceiling")) {
opcode = OP_CEILF;
} else if (!strcmp (cmethod->name, "Cos")) {
opcode = OP_COSF;
} else if (!strcmp (cmethod->name, "Exp")) {
opcode = OP_EXPF;
} else if (!strcmp (cmethod->name, "Floor")) {
opcode = OP_FLOORF;
} else if (!strcmp (cmethod->name, "Log2")) {
opcode = OP_LOG2F;
} else if (!strcmp (cmethod->name, "Log10")) {
opcode = OP_LOG10F;
} else if (!strcmp (cmethod->name, "Sin")) {
opcode = OP_SINF;
} else if (!strcmp (cmethod->name, "Sqrt")) {
opcode = OP_SQRTF;
} else if (!strcmp (cmethod->name, "Truncate")) {
opcode = OP_TRUNCF;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64)
else if (!strcmp (cmethod->name, "Round") && (mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0) {
// special case: emit vroundss for MathF.Round directly instead of what llvm.round.f32 emits
// to align with CoreCLR behavior
int xreg = alloc_xreg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R4_X, xreg, args [0]->dreg);
int xround = alloc_xreg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_SSE41_ROUNDS, xround, xreg, xreg);
ins->inst_c0 = 0x4; // vroundss xmm0, xmm0, xmm0, 0x4 (mode for rounding)
ins->inst_c1 = MONO_TYPE_R4;
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R4, dreg, xround);
ins->inst_c0 = 0;
ins->inst_c1 = MONO_TYPE_R4;
return ins;
}
#endif
}
// (float, float)
if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Pow")) {
opcode = OP_RPOW;
} else if (!strcmp (cmethod->name, "CopySign")) {
opcode = OP_RCOPYSIGN;
}
}
// (float, float, float)
if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4 && fsig->params [2]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "FusedMultiplyAdd")) {
opcode = OP_FMAF;
}
}
if (opcode) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type);
ins->sreg1 = args [0]->dreg;
if (fsig->param_count > 1) {
ins->sreg2 = args [1]->dreg;
}
if (fsig->param_count > 2) {
ins->sreg3 = args [2]->dreg;
}
g_assert (fsig->param_count <= 3);
MONO_ADD_INS (cfg->cbb, ins);
}
}
if (cmethod->klass == mono_class_try_get_math_class ()) {
// (double)
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
if (!strcmp (cmethod->name, "Abs")) {
opcode = OP_ABS;
} else if (!strcmp (cmethod->name, "Ceiling")) {
opcode = OP_CEIL;
} else if (!strcmp (cmethod->name, "Cos")) {
opcode = OP_COS;
} else if (!strcmp (cmethod->name, "Exp")) {
opcode = OP_EXP;
} else if (!strcmp (cmethod->name, "Floor")) {
opcode = OP_FLOOR;
} else if (!strcmp (cmethod->name, "Log")) {
opcode = OP_LOG;
} else if (!strcmp (cmethod->name, "Log2")) {
opcode = OP_LOG2;
} else if (!strcmp (cmethod->name, "Log10")) {
opcode = OP_LOG10;
} else if (!strcmp (cmethod->name, "Sin")) {
opcode = OP_SIN;
} else if (!strcmp (cmethod->name, "Sqrt")) {
opcode = OP_SQRT;
} else if (!strcmp (cmethod->name, "Truncate")) {
opcode = OP_TRUNC;
}
}
// (double, double)
if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) {
// Max and Min can only be optimized in fast math mode
if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) {
opcode = OP_FMAX;
} else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) {
opcode = OP_FMIN;
} else if (!strcmp (cmethod->name, "Pow")) {
opcode = OP_FPOW;
} else if (!strcmp (cmethod->name, "CopySign")) {
opcode = OP_FCOPYSIGN;
}
}
// (double, double, double)
if (fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8 && fsig->params [2]->type == MONO_TYPE_R8) {
if (!strcmp (cmethod->name, "FusedMultiplyAdd")) {
opcode = OP_FMA;
}
}
// Math also contains overloads for floats (MathF inlines them)
// (float)
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Abs")) {
opcode = OP_ABSF;
}
}
// (float, float)
if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R4 && fsig->params [1]->type == MONO_TYPE_R4) {
if (!strcmp (cmethod->name, "Max") && mono_use_fast_math) {
opcode = OP_RMAX;
} else if (!strcmp (cmethod->name, "Min") && mono_use_fast_math) {
opcode = OP_RMIN;
} else if (!strcmp (cmethod->name, "Pow")) {
opcode = OP_RPOW;
}
}
if (opcode && fsig->param_count > 0) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type);
ins->sreg1 = args [0]->dreg;
if (fsig->param_count > 1) {
ins->sreg2 = args [1]->dreg;
}
if (fsig->param_count > 2) {
ins->sreg3 = args [2]->dreg;
}
g_assert (fsig->param_count <= 3);
MONO_ADD_INS (cfg->cbb, ins);
}
opcode = 0;
if (cfg->opt & MONO_OPT_CMOV) {
if (strcmp (cmethod->name, "Min") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMIN;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMIN_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMIN;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMIN_UN;
} else if (strcmp (cmethod->name, "Max") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMAX;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMAX_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMAX;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMAX_UN;
}
}
if (opcode && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType)ins->type);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
}
if (in_corlib && !strcmp (m_class_get_name (cmethod->klass), "Buffer")) {
if (!strcmp (cmethod->name, "Memmove") && fsig->param_count == 3 && fsig->params [0]->type == MONO_TYPE_PTR && fsig->params [1]->type == MONO_TYPE_PTR) {
MonoBasicBlock *end_bb;
NEW_BBLOCK (cfg, end_bb);
// do nothing if len == 0 (even if src or dst are nulls)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [2]->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, end_bb);
// throw NRE if src or dst are nulls
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [0]->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, args [1]->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
MONO_INST_NEW (cfg, ins, OP_MEMMOVE);
ins->sreg1 = args [0]->dreg; // i1* dst
ins->sreg2 = args [1]->dreg; // i1* src
ins->sreg3 = args [2]->dreg; // i32/i64 len
MONO_ADD_INS (cfg->cbb, ins);
MONO_START_BB (cfg, end_bb);
}
}
return ins;
}
static MonoInst*
emit_span_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins;
MonoClassField *ptr_field = mono_class_get_field_from_name_full (cmethod->klass, "_pointer", NULL);
if (!ptr_field)
/* Portable Span<T> */
return NULL;
if (!strcmp (cmethod->name, "get_Item")) {
MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL);
g_assert (length_field);
MonoGenericClass *gclass = mono_class_get_generic_class (cmethod->klass);
MonoClass *param_class = mono_class_from_mono_type_internal (gclass->context.class_inst->type_argv [0]);
if (mini_is_gsharedvt_variable_klass (param_class))
return NULL;
int span_reg = args [0]->dreg;
/* Load _pointer.Value */
int base_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, base_reg, span_reg, ptr_field->offset - MONO_ABI_SIZEOF (MonoObject));
/* Similar to mini_emit_ldelema_1_ins () */
int size = mono_class_array_element_size (param_class);
int index_reg = mini_emit_sext_index_reg (cfg, args [1]);
mini_emit_bounds_check_offset (cfg, span_reg, length_field->offset - MONO_ABI_SIZEOF (MonoObject), index_reg, NULL);
// FIXME: Sign extend index ?
int mult_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index_reg, size);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, add_reg, base_reg, mult_reg);
ins->klass = param_class;
ins->type = STACK_MP;
return ins;
} else if (!strcmp (cmethod->name, "get_Length")) {
MonoClassField *length_field = mono_class_get_field_from_name_full (cmethod->klass, "_length", NULL);
g_assert (length_field);
/*
* FIXME: This doesn't work with abcrem, since the src is a unique LDADDR not
* the same array object.
*/
MONO_INST_NEW (cfg, ins, OP_LDLEN);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = args [0]->dreg;
ins->inst_imm = length_field->offset - MONO_ABI_SIZEOF (MonoObject);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
return ins;
}
return NULL;
}
static MonoInst*
emit_unsafe_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins;
int dreg, align;
MonoGenericContext *ctx = mono_method_get_context (cmethod);
MonoType *t;
if (!strcmp (cmethod->name, "As")) {
g_assert (ctx);
g_assert (ctx->method_inst);
t = ctx->method_inst->type_argv [0];
if (ctx->method_inst->type_argc == 2) {
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_OBJ;
ins->klass = mono_get_object_class ();
return ins;
} else if (ctx->method_inst->type_argc == 1) {
if (mini_is_gsharedvt_variable_type (t))
return NULL;
// Casts the given object to the specified type, performs no dynamic type checking.
g_assert (fsig->param_count == 1);
g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT);
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_OBJ;
ins->klass = mono_class_from_mono_type_internal (ctx->method_inst->type_argv [0]);
return ins;
}
} else if (!strcmp (cmethod->name, "AsPointer")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 1);
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "AsRef")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 1);
dreg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, args [0]->dreg);
ins->type = STACK_OBJ;
ins->klass = mono_get_object_class ();
return ins;
} else if (!strcmp (cmethod->name, "AreSame")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "IsAddressLessThan")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_PCLT_UN, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "IsAddressGreaterThan")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_PCGT_UN, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "Add")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
int mul_reg = alloc_preg (cfg);
t = ctx->method_inst->type_argv [0];
MonoInst *esize_ins;
if (mini_is_gsharedvt_variable_type (t)) {
esize_ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF);
if (SIZEOF_REGISTER == 8)
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, esize_ins->dreg, esize_ins->dreg);
} else {
t = mini_type_get_underlying_type (t);
int esize = mono_class_array_element_size (mono_class_from_mono_type_internal (t));
EMIT_NEW_ICONST (cfg, esize_ins, esize);
}
esize_ins->type = STACK_I4;
EMIT_NEW_BIALU (cfg, ins, OP_PMUL, mul_reg, args [1]->dreg, esize_ins->dreg);
ins->type = STACK_PTR;
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, mul_reg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "AddByteOffset")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
if (fsig->params [1]->type == MONO_TYPE_I || fsig->params [1]->type == MONO_TYPE_U) {
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, args [1]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (fsig->params [1]->type == MONO_TYPE_U8) {
int sreg = args [1]->dreg;
if (SIZEOF_REGISTER == 4) {
sreg = alloc_ireg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_LCONV_TO_U4, sreg, args [1]->dreg);
}
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, args [0]->dreg, sreg);
ins->type = STACK_PTR;
return ins;
}
} else if (!strcmp (cmethod->name, "SizeOf")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 0);
t = ctx->method_inst->type_argv [0];
if (mini_is_gsharedvt_variable_type (t)) {
ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (t), MONO_RGCTX_INFO_CLASS_SIZEOF);
} else {
int esize = mono_type_size (t, &align);
EMIT_NEW_ICONST (cfg, ins, esize);
}
ins->type = STACK_I4;
return ins;
} else if (!strcmp (cmethod->name, "ReadUnaligned")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 1);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
return mini_emit_memory_load (cfg, t, args [0], 0, MONO_INST_UNALIGNED);
} else if (!strcmp (cmethod->name, "WriteUnaligned")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
mini_emit_memory_store (cfg, t, args [0], args [1], MONO_INST_UNALIGNED);
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (!strcmp (cmethod->name, "ByteOffset")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [1]->dreg, args [0]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "Unbox")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
MonoClass *klass = mono_class_from_mono_type_internal (t);
int context_used = mini_class_check_context_used (cfg, klass);
return mini_handle_unbox (cfg, klass, args [0], context_used);
} else if (!strcmp (cmethod->name, "Copy")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
MonoClass *klass = mono_class_from_mono_type_internal (t);
mini_emit_memory_copy (cfg, args [0], args [1], klass, FALSE, 0);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "CopyBlock")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], 0);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "CopyBlockUnaligned")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_copy_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "InitBlock")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], 0);
return cfg->cbb->last_ins;
} else if (!strcmp (cmethod->name, "InitBlockUnaligned")) {
g_assert (fsig->param_count == 3);
mini_emit_memory_init_bytes (cfg, args [0], args [1], args [2], MONO_INST_UNALIGNED);
return cfg->cbb->last_ins;
}
else if (!strcmp (cmethod->name, "SkipInit")) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (!strcmp (cmethod->name, "SubtractByteOffset")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PSUB, dreg, args [0]->dreg, args [1]->dreg);
ins->type = STACK_PTR;
return ins;
} else if (!strcmp (cmethod->name, "IsNullRef")) {
g_assert (fsig->param_count == 1);
MONO_EMIT_NEW_COMPARE_IMM (cfg, args [0]->dreg, 0);
int dreg = alloc_ireg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_PCEQ, dreg, -1);
return ins;
} else if (!strcmp (cmethod->name, "NullRef")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 0);
EMIT_NEW_PCONST (cfg, ins, NULL);
ins->type = STACK_MP;
ins->klass = mono_class_from_mono_type_internal (fsig->ret);
return ins;
}
return NULL;
}
static MonoInst*
emit_jit_helpers_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins;
int dreg;
MonoGenericContext *ctx = mono_method_get_context (cmethod);
MonoType *t;
if (!strcmp (cmethod->name, "EnumEquals") || !strcmp (cmethod->name, "EnumCompareTo")) {
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
g_assert (fsig->param_count == 2);
t = ctx->method_inst->type_argv [0];
t = mini_get_underlying_type (t);
if (mini_is_gsharedvt_variable_type (t))
return NULL;
gboolean is_i8 = (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8);
gboolean is_unsigned = (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_U4 || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U);
int cmp_op, ceq_op, cgt_op, clt_op;
if (is_i8) {
cmp_op = OP_LCOMPARE;
ceq_op = OP_LCEQ;
cgt_op = is_unsigned ? OP_LCGT_UN : OP_LCGT;
clt_op = is_unsigned ? OP_LCLT_UN : OP_LCLT;
} else {
cmp_op = OP_ICOMPARE;
ceq_op = OP_ICEQ;
cgt_op = is_unsigned ? OP_ICGT_UN : OP_ICGT;
clt_op = is_unsigned ? OP_ICLT_UN : OP_ICLT;
}
if (!strcmp (cmethod->name, "EnumEquals")) {
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, ceq_op, dreg, -1);
} else {
// Use the branchless code (a > b) - (a < b)
int reg1, reg2;
reg1 = alloc_ireg (cfg);
reg2 = alloc_ireg (cfg);
dreg = alloc_ireg (cfg);
if (t->type >= MONO_TYPE_BOOLEAN && t->type <= MONO_TYPE_U2)
{
// Use "a - b" for small types (smaller than Int32)
EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, args [0]->dreg, args [1]->dreg);
}
else
{
EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, cgt_op, reg1, -1);
EMIT_NEW_BIALU (cfg, ins, cmp_op, -1, args [0]->dreg, args [1]->dreg);
EMIT_NEW_UNALU (cfg, ins, clt_op, reg2, -1);
EMIT_NEW_BIALU (cfg, ins, OP_ISUB, dreg, reg1, reg2);
}
}
return ins;
}
return NULL;
}
static gboolean
byref_arg_is_reference (MonoType *t)
{
g_assert (m_type_is_byref (t));
return mini_type_is_reference (m_class_get_byval_arg (mono_class_from_mono_type_internal (t)));
}
/*
* If INS represents the result of an ldtoken+Type::GetTypeFromHandle IL sequence,
* return the type.
*/
static MonoClass*
get_class_from_ldtoken_ins (MonoInst *ins)
{
// FIXME: The JIT case uses PCONST
if (ins->opcode == OP_AOTCONST) {
if (ins->inst_p1 != (gpointer)MONO_PATCH_INFO_TYPE_FROM_HANDLE)
return NULL;
MonoJumpInfoToken *token = (MonoJumpInfoToken*)ins->inst_p0;
MonoClass *handle_class;
ERROR_DECL (error);
gpointer handle = mono_ldtoken_checked (token->image, token->token, &handle_class, NULL, error);
mono_error_assert_ok (error);
MonoType *t = (MonoType*)handle;
return mono_class_from_mono_type_internal (t);
} else if (ins->opcode == OP_RTTYPE) {
return (MonoClass*)ins->inst_p0;
} else {
return NULL;
}
}
/*
* Given two instructions representing rttypes, return
* their relation (EQ/NE/NONE).
*/
static CompRelation
get_rttype_ins_relation (MonoInst *ins1, MonoInst *ins2)
{
MonoClass *k1 = get_class_from_ldtoken_ins (ins1);
MonoClass *k2 = get_class_from_ldtoken_ins (ins2);
CompRelation rel = CMP_UNORD;
if (k1 && k2) {
MonoType *t1 = m_class_get_byval_arg (k1);
MonoType *t2 = m_class_get_byval_arg (k2);
MonoType *constraint1 = NULL;
/* Common case in gshared BCL code: t1 is a gshared type like T_INT, and t2 is a concrete type */
if (mono_class_is_gparam (k1)) {
MonoGenericParam *gparam = t1->data.generic_param;
constraint1 = gparam->gshared_constraint;
}
if (constraint1) {
if (constraint1->type == MONO_TYPE_OBJECT) {
if (MONO_TYPE_IS_PRIMITIVE (t2) || MONO_TYPE_ISSTRUCT (t2))
rel = CMP_NE;
} else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) {
if (MONO_TYPE_IS_PRIMITIVE (t2) && constraint1->type != t2->type)
rel = CMP_NE;
else if (MONO_TYPE_IS_REFERENCE (t2))
rel = CMP_NE;
}
}
}
return rel;
}
MonoInst*
mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized)
{
MonoInst *ins = NULL;
MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
*ins_type_initialized = FALSE;
const char* cmethod_klass_name_space;
if (m_class_get_nested_in (cmethod->klass))
cmethod_klass_name_space = m_class_get_name_space (m_class_get_nested_in (cmethod->klass));
else
cmethod_klass_name_space = m_class_get_name_space (cmethod->klass);
const char* cmethod_klass_name = m_class_get_name (cmethod->klass);
MonoImage *cmethod_klass_image = m_class_get_image (cmethod->klass);
gboolean in_corlib = cmethod_klass_image == mono_defaults.corlib;
/* Required intrinsics are always used even with -O=-intrins */
if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System") &&
!strcmp (cmethod_klass_name, "ByReference`1") &&
!strcmp (cmethod->name, "get_Value")) {
g_assert (fsig->hasthis && fsig->param_count == 0);
int dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, args [0]->dreg, 0);
return ins;
}
if (!(cfg->opt & MONO_OPT_INTRINS))
return NULL;
if (cmethod->klass == mono_defaults.string_class) {
if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
int dreg = alloc_ireg (cfg);
int index_reg = alloc_preg (cfg);
int add_reg = alloc_preg (cfg);
#if SIZEOF_REGISTER == 8
if (COMPILE_LLVM (cfg)) {
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
} else {
/* The array reg is 64 bits but the index reg is only 32 */
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
}
#else
index_reg = args [1]->dreg;
#endif
MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
#if defined(TARGET_X86) || defined(TARGET_AMD64)
EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
add_reg = ins->dreg;
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
add_reg, 0);
#else
int mult_reg = alloc_preg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
#endif
mini_type_from_op (cfg, ins, NULL, NULL);
return ins;
} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
/* Decompose later to allow more optimizations */
EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
ins->type = STACK_I4;
ins->flags |= MONO_INST_FAULT;
cfg->cbb->needs_decompose = TRUE;
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
return ins;
} else
return NULL;
} else if (cmethod->klass == mono_defaults.object_class) {
if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg_ref (cfg);
int vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
mini_type_from_op (cfg, ins, NULL, NULL);
mini_type_to_eval_stack_type (cfg, fsig->ret, ins);
ins->klass = mono_defaults.runtimetype_class;
*ins_type_initialized = TRUE;
return ins;
} else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
int dreg = alloc_ireg (cfg);
int t1 = alloc_ireg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, t1, args [0]->dreg, 3);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
ins->type = STACK_I4;
return ins;
} else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else
return NULL;
} else if (cmethod->klass == mono_defaults.array_class) {
if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "GetGenericValueImpl") == 0)
return emit_array_generic_access (cfg, fsig, args, FALSE);
else if (fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt && strcmp (cmethod->name, "SetGenericValueImpl") == 0)
return emit_array_generic_access (cfg, fsig, args, TRUE);
else if (!strcmp (cmethod->name, "GetElementSize")) {
int vt_reg = alloc_preg (cfg);
int class_reg = alloc_preg (cfg);
int sizes_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, class_reg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, sizes_reg, class_reg, m_class_offsetof_sizes ());
return ins;
} else if (!strcmp (cmethod->name, "IsPrimitive")) {
int dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_IS_PRIMITIVE);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0);
EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1);
ins->type = STACK_I4;
return ins;
}
#ifndef MONO_BIG_ARRAYS
/*
* This is an inline version of GetLength/GetLowerBound(0) used frequently in
* Array methods.
*/
else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
(strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
int dreg = alloc_ireg (cfg);
int bounds_reg = alloc_ireg_mp (cfg);
MonoBasicBlock *end_bb, *szarray_bb;
gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
NEW_BBLOCK (cfg, end_bb);
NEW_BBLOCK (cfg, szarray_bb);
EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
/* Non-szarray case */
if (get_length)
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
else
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, szarray_bb);
/* Szarray case */
if (get_length)
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
else
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
MONO_START_BB (cfg, end_bb);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
ins->type = STACK_I4;
return ins;
}
#endif
if (cmethod->name [0] != 'g')
return NULL;
if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
int vtable_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
mini_type_from_op (cfg, ins, NULL, NULL);
return ins;
} else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
int dreg = alloc_ireg (cfg);
EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
mini_type_from_op (cfg, ins, NULL, NULL);
return ins;
} else
return NULL;
} else if (cmethod->klass == runtime_helpers_class) {
if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
return ins;
} else if (!strcmp (cmethod->name, "GetRawData")) {
int dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_ABI_SIZEOF (MonoObject));
return ins;
} else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
MonoType *arg_type = ctx->method_inst->type_argv [0];
MonoType *t;
MonoClass *klass;
ins = NULL;
/* Resolve the argument class as possible so we can handle common cases fast */
t = mini_get_underlying_type (arg_type);
klass = mono_class_from_mono_type_internal (t);
mono_class_init_internal (klass);
if (MONO_TYPE_IS_REFERENCE (t))
EMIT_NEW_ICONST (cfg, ins, 1);
else if (MONO_TYPE_IS_PRIMITIVE (t))
EMIT_NEW_ICONST (cfg, ins, 0);
else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
EMIT_NEW_ICONST (cfg, ins, 1);
else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
EMIT_NEW_ICONST (cfg, ins, m_class_has_references (klass) ? 1 : 0);
else {
g_assert (cfg->gshared);
/* Have to use the original argument class here */
MonoClass *arg_class = mono_class_from_mono_type_internal (arg_type);
int context_used = mini_class_check_context_used (cfg, arg_class);
/* This returns 1 or 2 */
MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
int dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
}
return ins;
} else if (strcmp (cmethod->name, "IsBitwiseEquatable") == 0 && fsig->param_count == 0) {
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
MonoType *arg_type = ctx->method_inst->type_argv [0];
MonoType *t;
ins = NULL;
/* Resolve the argument class as possible so we can handle common cases fast */
t = mini_get_underlying_type (arg_type);
if (MONO_TYPE_IS_PRIMITIVE (t) && t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8)
EMIT_NEW_ICONST (cfg, ins, 1);
else
EMIT_NEW_ICONST (cfg, ins, 0);
return ins;
} else if (!strcmp (cmethod->name, "ObjectHasComponentSize")) {
g_assert (fsig->param_count == 1);
g_assert (fsig->params [0]->type == MONO_TYPE_OBJECT);
// Return true for arrays and string
int dreg;
dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_ARRAY_OR_STRING);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0);
EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1);
ins->type = STACK_I4;
return ins;
} else if (!strcmp (cmethod->name, "ObjectHasReferences")) {
int dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, dreg, MONO_STRUCT_OFFSET (MonoVTable, flags));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, dreg, dreg, MONO_VT_FLAG_HAS_REFERENCES);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_COMPARE_IMM, -1, dreg, 0);
EMIT_NEW_UNALU (cfg, ins, OP_ICGT, dreg, -1);
ins->type = STACK_I4;
return ins;
} else
return NULL;
} else if (cmethod->klass == mono_class_try_get_memory_marshal_class ()) {
if (!strcmp (cmethod->name, "GetArrayDataReference")) {
// Logic below works for both SZARRAY and MDARRAY
int dreg = alloc_preg (cfg);
MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg, FALSE);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
return ins;
}
} else if (cmethod->klass == mono_defaults.monitor_class) {
gboolean is_enter = FALSE;
gboolean is_v4 = FALSE;
if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && m_type_is_byref (fsig->params [1])) {
is_enter = TRUE;
is_v4 = TRUE;
}
if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
is_enter = TRUE;
if (is_enter) {
/*
* To make async stack traces work, icalls which can block should have a wrapper.
* For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
*/
MonoBasicBlock *end_bb;
NEW_BBLOCK (cfg, end_bb);
if (is_v4)
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_fast, args);
else
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_fast, args);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
if (is_v4)
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_v4_internal, args);
else
ins = mono_emit_jit_icall (cfg, mono_monitor_enter_internal, args);
MONO_START_BB (cfg, end_bb);
return ins;
}
} else if (cmethod->klass == mono_defaults.thread_class) {
if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
} else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
} else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1 && m_type_is_byref (fsig->params [0])) {
guint32 opcode = 0;
gboolean is_ref = byref_arg_is_reference (fsig->params [0]);
if (fsig->params [0]->type == MONO_TYPE_I1)
opcode = OP_LOADI1_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_U1)
opcode = OP_LOADU1_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_I2)
opcode = OP_LOADI2_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_U2)
opcode = OP_LOADU2_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_LOADI4_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_LOADU4_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LOADI8_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_R4)
opcode = OP_LOADR4_MEMBASE;
else if (fsig->params [0]->type == MONO_TYPE_R8)
opcode = OP_LOADR8_MEMBASE;
else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
opcode = OP_LOAD_MEMBASE;
if (opcode) {
MONO_INST_NEW (cfg, ins, opcode);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
MONO_ADD_INS (cfg->cbb, ins);
switch (fsig->params [0]->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
ins->dreg = mono_alloc_ireg (cfg);
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
ins->dreg = mono_alloc_lreg (cfg);
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
ins->dreg = mono_alloc_ireg (cfg);
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
ins->dreg = mono_alloc_freg (cfg);
ins->type = STACK_R8;
break;
default:
g_assert (is_ref);
ins->dreg = mono_alloc_ireg_ref (cfg);
ins->type = STACK_OBJ;
break;
}
if (opcode == OP_LOADI8_MEMBASE)
ins = mono_decompose_opcode (cfg, ins);
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
return ins;
}
} else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) {
guint32 opcode = 0;
gboolean is_ref = byref_arg_is_reference (fsig->params [0]);
if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
opcode = OP_STOREI1_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
opcode = OP_STOREI2_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_STOREI4_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_STOREI8_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_R4)
opcode = OP_STORER4_MEMBASE_REG;
else if (fsig->params [0]->type == MONO_TYPE_R8)
opcode = OP_STORER8_MEMBASE_REG;
else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
opcode = OP_STORE_MEMBASE_REG;
if (opcode) {
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
MONO_INST_NEW (cfg, ins, opcode);
ins->sreg1 = args [1]->dreg;
ins->inst_destbasereg = args [0]->dreg;
ins->inst_offset = 0;
MONO_ADD_INS (cfg->cbb, ins);
if (opcode == OP_STOREI8_MEMBASE_REG)
ins = mono_decompose_opcode (cfg, ins);
return ins;
}
}
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Threading") == 0) &&
(strcmp (cmethod_klass_name, "Interlocked") == 0)) {
ins = NULL;
#if SIZEOF_REGISTER == 8
if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
ins->dreg = mono_alloc_preg (cfg);
ins->sreg1 = args [0]->dreg;
ins->type = STACK_I8;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
MONO_ADD_INS (cfg->cbb, ins);
} else {
MonoInst *load_ins;
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
/* 64 bit reads are already atomic */
MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
load_ins->dreg = mono_alloc_preg (cfg);
load_ins->inst_basereg = args [0]->dreg;
load_ins->inst_offset = 0;
load_ins->type = STACK_I8;
MONO_ADD_INS (cfg->cbb, load_ins);
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
ins = load_ins;
}
}
#endif
if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
MonoInst *ins_iconst;
guint32 opcode = 0;
if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_ADD_I4;
cfg->has_atomic_add_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_I8;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
ins_iconst->inst_c0 = 1;
ins_iconst->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins_iconst);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = ins_iconst->dreg;
ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
} else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
MonoInst *ins_iconst;
guint32 opcode = 0;
if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = OP_ATOMIC_ADD_I4;
cfg->has_atomic_add_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_ATOMIC_ADD_I8;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
ins_iconst->inst_c0 = -1;
ins_iconst->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins_iconst);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = ins_iconst->dreg;
ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
} else if (fsig->param_count == 2 &&
((strcmp (cmethod->name, "Add") == 0) ||
(strcmp (cmethod->name, "And") == 0) ||
(strcmp (cmethod->name, "Or") == 0))) {
guint32 opcode = 0;
guint32 opcode_i4 = 0;
guint32 opcode_i8 = 0;
if (strcmp (cmethod->name, "Add") == 0) {
opcode_i4 = OP_ATOMIC_ADD_I4;
opcode_i8 = OP_ATOMIC_ADD_I8;
} else if (strcmp (cmethod->name, "And") == 0) {
opcode_i4 = OP_ATOMIC_AND_I4;
opcode_i8 = OP_ATOMIC_AND_I8;
} else if (strcmp (cmethod->name, "Or") == 0) {
opcode_i4 = OP_ATOMIC_OR_I4;
opcode_i8 = OP_ATOMIC_OR_I8;
} else {
g_assert_not_reached ();
}
if (fsig->params [0]->type == MONO_TYPE_I4) {
opcode = opcode_i4;
cfg->has_atomic_add_i4 = TRUE;
} else if (fsig->params [0]->type == MONO_TYPE_I8 && SIZEOF_REGISTER == 8) {
opcode = opcode_i8;
}
// For now, only Add is supported in non-LLVM back-ends
if (opcode && (COMPILE_LLVM (cfg) || mono_arch_opcode_supported (opcode))) {
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
ins->type = (opcode == opcode_i4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
}
else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2 && m_type_is_byref (fsig->params [0])) {
MonoInst *f2i = NULL, *i2f;
guint32 opcode, f2i_opcode, i2f_opcode;
gboolean is_ref = byref_arg_is_reference (fsig->params [0]);
gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
if (fsig->params [0]->type == MONO_TYPE_I4 ||
fsig->params [0]->type == MONO_TYPE_R4) {
opcode = OP_ATOMIC_EXCHANGE_I4;
f2i_opcode = OP_MOVE_F_TO_I4;
i2f_opcode = OP_MOVE_I4_TO_F;
cfg->has_atomic_exchange_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (is_ref ||
fsig->params [0]->type == MONO_TYPE_I8 ||
fsig->params [0]->type == MONO_TYPE_R8 ||
fsig->params [0]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_EXCHANGE_I8;
f2i_opcode = OP_MOVE_F_TO_I8;
i2f_opcode = OP_MOVE_I8_TO_F;
}
#else
else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_EXCHANGE_I4;
cfg->has_atomic_exchange_i4 = TRUE;
}
#endif
else
return NULL;
if (!mono_arch_opcode_supported (opcode))
return NULL;
if (is_float) {
/* TODO: Decompose these opcodes instead of bailing here. */
if (COMPILE_SOFT_FLOAT (cfg))
return NULL;
MONO_INST_NEW (cfg, f2i, f2i_opcode);
f2i->dreg = mono_alloc_ireg (cfg);
f2i->sreg1 = args [1]->dreg;
if (f2i_opcode == OP_MOVE_F_TO_I4)
f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, f2i);
}
if (is_ref && !mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
switch (fsig->params [0]->type) {
case MONO_TYPE_I4:
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
ins->type = STACK_R8;
break;
default:
g_assert (is_ref);
ins->type = STACK_OBJ;
break;
}
if (is_float) {
MONO_INST_NEW (cfg, i2f, i2f_opcode);
i2f->dreg = mono_alloc_freg (cfg);
i2f->sreg1 = ins->dreg;
i2f->type = STACK_R8;
if (i2f_opcode == OP_MOVE_I4_TO_F)
i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, i2f);
ins = i2f;
}
if (cfg->gen_write_barriers && is_ref)
mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
guint32 opcode, f2i_opcode, i2f_opcode;
gboolean is_ref = mini_type_is_reference (fsig->params [1]);
gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
if (fsig->params [1]->type == MONO_TYPE_I4 ||
fsig->params [1]->type == MONO_TYPE_R4) {
opcode = OP_ATOMIC_CAS_I4;
f2i_opcode = OP_MOVE_F_TO_I4;
i2f_opcode = OP_MOVE_I4_TO_F;
cfg->has_atomic_cas_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (is_ref ||
fsig->params [1]->type == MONO_TYPE_I8 ||
fsig->params [1]->type == MONO_TYPE_R8 ||
fsig->params [1]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_CAS_I8;
f2i_opcode = OP_MOVE_F_TO_I8;
i2f_opcode = OP_MOVE_I8_TO_F;
}
#else
else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
opcode = OP_ATOMIC_CAS_I4;
cfg->has_atomic_cas_i4 = TRUE;
}
#endif
else
return NULL;
if (!mono_arch_opcode_supported (opcode))
return NULL;
if (is_float) {
/* TODO: Decompose these opcodes instead of bailing here. */
if (COMPILE_SOFT_FLOAT (cfg))
return NULL;
MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
f2i_new->dreg = mono_alloc_ireg (cfg);
f2i_new->sreg1 = args [1]->dreg;
if (f2i_opcode == OP_MOVE_F_TO_I4)
f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, f2i_new);
MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
f2i_cmp->dreg = mono_alloc_ireg (cfg);
f2i_cmp->sreg1 = args [2]->dreg;
if (f2i_opcode == OP_MOVE_F_TO_I4)
f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, f2i_cmp);
}
if (is_ref && !mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
switch (fsig->params [1]->type) {
case MONO_TYPE_I4:
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
ins->type = cfg->r4_stack_type;
break;
case MONO_TYPE_R8:
ins->type = STACK_R8;
break;
default:
g_assert (mini_type_is_reference (fsig->params [1]));
ins->type = STACK_OBJ;
break;
}
if (is_float) {
MONO_INST_NEW (cfg, i2f, i2f_opcode);
i2f->dreg = mono_alloc_freg (cfg);
i2f->sreg1 = ins->dreg;
i2f->type = STACK_R8;
if (i2f_opcode == OP_MOVE_I4_TO_F)
i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
MONO_ADD_INS (cfg->cbb, i2f);
ins = i2f;
}
if (cfg->gen_write_barriers && is_ref)
mini_emit_write_barrier (cfg, args [0], args [1]);
}
else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
fsig->params [1]->type == MONO_TYPE_I4) {
MonoInst *cmp, *ceq;
if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
return NULL;
/* int32 r = CAS (location, value, comparand); */
MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->sreg3 = args [2]->dreg;
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
/* bool result = r == comparand; */
MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
cmp->sreg1 = ins->dreg;
cmp->sreg2 = args [2]->dreg;
cmp->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, cmp);
MONO_INST_NEW (cfg, ceq, OP_ICEQ);
ceq->dreg = alloc_ireg (cfg);
ceq->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ceq);
/* *success = result; */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
cfg->has_atomic_cas_i4 = TRUE;
}
else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
if (ins)
return ins;
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Threading") == 0) &&
(strcmp (cmethod_klass_name, "Volatile") == 0)) {
ins = NULL;
if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
guint32 opcode = 0;
MonoType *t = fsig->params [0];
gboolean is_ref;
gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
g_assert (m_type_is_byref (t));
is_ref = byref_arg_is_reference (t);
if (t->type == MONO_TYPE_I1)
opcode = OP_ATOMIC_LOAD_I1;
else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
opcode = OP_ATOMIC_LOAD_U1;
else if (t->type == MONO_TYPE_I2)
opcode = OP_ATOMIC_LOAD_I2;
else if (t->type == MONO_TYPE_U2)
opcode = OP_ATOMIC_LOAD_U2;
else if (t->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_LOAD_I4;
else if (t->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_LOAD_U4;
else if (t->type == MONO_TYPE_R4)
opcode = OP_ATOMIC_LOAD_R4;
else if (t->type == MONO_TYPE_R8)
opcode = OP_ATOMIC_LOAD_R8;
#if SIZEOF_REGISTER == 8
else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_LOAD_I8;
else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_LOAD_U8;
#else
else if (t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_LOAD_I4;
else if (is_ref || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_LOAD_U4;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
ins->sreg1 = args [0]->dreg;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
MONO_ADD_INS (cfg->cbb, ins);
switch (t->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
ins->type = STACK_I4;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
ins->type = STACK_I8;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if SIZEOF_REGISTER == 8
ins->type = STACK_I8;
#else
ins->type = STACK_I4;
#endif
break;
case MONO_TYPE_R4:
ins->type = cfg->r4_stack_type;
break;
case MONO_TYPE_R8:
ins->type = STACK_R8;
break;
default:
g_assert (is_ref);
ins->type = STACK_OBJ;
break;
}
}
}
if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
guint32 opcode = 0;
MonoType *t = fsig->params [0];
gboolean is_ref;
g_assert (m_type_is_byref (t));
is_ref = byref_arg_is_reference (t);
if (t->type == MONO_TYPE_I1)
opcode = OP_ATOMIC_STORE_I1;
else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
opcode = OP_ATOMIC_STORE_U1;
else if (t->type == MONO_TYPE_I2)
opcode = OP_ATOMIC_STORE_I2;
else if (t->type == MONO_TYPE_U2)
opcode = OP_ATOMIC_STORE_U2;
else if (t->type == MONO_TYPE_I4)
opcode = OP_ATOMIC_STORE_I4;
else if (t->type == MONO_TYPE_U4)
opcode = OP_ATOMIC_STORE_U4;
else if (t->type == MONO_TYPE_R4)
opcode = OP_ATOMIC_STORE_R4;
else if (t->type == MONO_TYPE_R8)
opcode = OP_ATOMIC_STORE_R8;
#if SIZEOF_REGISTER == 8
else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_STORE_I8;
else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_STORE_U8;
#else
else if (t->type == MONO_TYPE_I)
opcode = OP_ATOMIC_STORE_I4;
else if (is_ref || t->type == MONO_TYPE_U)
opcode = OP_ATOMIC_STORE_U4;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
return NULL;
MONO_INST_NEW (cfg, ins, opcode);
ins->dreg = args [0]->dreg;
ins->sreg1 = args [1]->dreg;
ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
MONO_ADD_INS (cfg->cbb, ins);
if (cfg->gen_write_barriers && is_ref)
mini_emit_write_barrier (cfg, args [0], args [1]);
}
}
if (ins)
return ins;
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Diagnostics") == 0) &&
(strcmp (cmethod_klass_name, "Debugger") == 0)) {
if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
}
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Reflection") == 0) &&
(strcmp (cmethod_klass_name, "Assembly") == 0)) {
if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
/* No stack walks are currently available, so implement this as an intrinsic */
MonoInst *assembly_ins;
EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, m_class_get_image (cfg->method->klass));
ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
return ins;
}
// While it is not required per
// https://msdn.microsoft.com/en-us/library/system.reflection.assembly.getcallingassembly(v=vs.110).aspx.
// have GetCallingAssembly be consistent independently of varying optimization.
// This fixes mono/tests/test-inline-call-stack.cs under FullAOT+LLVM.
cfg->no_inline |= COMPILE_LLVM (cfg) && strcmp (cmethod->name, "GetCallingAssembly") == 0;
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Reflection") == 0) &&
(strcmp (cmethod_klass_name, "MethodBase") == 0)) {
if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
/* No stack walks are currently available, so implement this as an intrinsic */
MonoInst *method_ins;
MonoMethod *declaring = cfg->method;
/* This returns the declaring generic method */
if (declaring->is_inflated)
declaring = ((MonoMethodInflated*)cfg->method)->declaring;
EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
cfg->no_inline = TRUE;
if (cfg->method != cfg->current_method)
mini_set_inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
return ins;
}
} else if (cmethod->klass == mono_class_try_get_math_class ()) {
/*
* There is general branchless code for Min/Max, but it does not work for
* all inputs:
* http://everything2.com/?node_id=1051618
*/
/*
* Constant folding for various Math methods.
* we avoid folding constants that when computed would raise an error, in
* case the user code was expecting to get that error raised
*/
if (fsig->param_count == 1 && args [0]->opcode == OP_R8CONST){
double source = *(double *)args [0]->inst_p0;
int opcode = 0;
const char *mname = cmethod->name;
char c = mname [0];
if (c == 'A'){
if (strcmp (mname, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
opcode = OP_ABS;
} else if (strcmp (mname, "Asin") == 0){
if (fabs (source) <= 1)
opcode = OP_ASIN;
} else if (strcmp (mname, "Asinh") == 0){
opcode = OP_ASINH;
} else if (strcmp (mname, "Acos") == 0){
if (fabs (source) <= 1)
opcode = OP_ACOS;
} else if (strcmp (mname, "Acosh") == 0){
if (source >= 1)
opcode = OP_ACOSH;
} else if (strcmp (mname, "Atan") == 0){
opcode = OP_ATAN;
} else if (strcmp (mname, "Atanh") == 0){
if (fabs (source) < 1)
opcode = OP_ATANH;
}
} else if (c == 'C'){
if (strcmp (mname, "Cos") == 0) {
if (!isinf (source))
opcode = OP_COS;
} else if (strcmp (mname, "Cbrt") == 0){
opcode = OP_CBRT;
} else if (strcmp (mname, "Cosh") == 0){
opcode = OP_COSH;
}
} else if (c == 'R'){
if (strcmp (mname, "Round") == 0)
opcode = OP_ROUND;
} else if (c == 'S'){
if (strcmp (mname, "Sin") == 0) {
if (!isinf (source))
opcode = OP_SIN;
} else if (strcmp (mname, "Sqrt") == 0) {
if (source >= 0)
opcode = OP_SQRT;
} else if (strcmp (mname, "Sinh") == 0){
opcode = OP_SINH;
}
} else if (c == 'T'){
if (strcmp (mname, "Tan") == 0){
if (!isinf (source))
opcode = OP_TAN;
} else if (strcmp (mname, "Tanh") == 0){
opcode = OP_TANH;
}
}
if (opcode) {
double *dest = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double));
double result = 0;
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
ins->dreg = mono_alloc_dreg (cfg, (MonoStackType) ins->type);
ins->inst_p0 = dest;
switch (opcode){
case OP_ABS:
result = fabs (source);
break;
case OP_ACOS:
result = acos (source);
break;
case OP_ACOSH:
result = acosh (source);
break;
case OP_ASIN:
result = asin (source);
break;
case OP_ASINH:
result= asinh (source);
break;
case OP_ATAN:
result = atan (source);
break;
case OP_ATANH:
result = atanh (source);
break;
case OP_CBRT:
result = cbrt (source);
break;
case OP_COS:
result = cos (source);
break;
case OP_COSH:
result = cosh (source);
break;
case OP_ROUND:
result = mono_round_to_even (source);
break;
case OP_SIN:
result = sin (source);
break;
case OP_SINH:
result = sinh (source);
break;
case OP_SQRT:
result = sqrt (source);
break;
case OP_TAN:
result = tan (source);
break;
case OP_TANH:
result = tanh (source);
break;
default:
g_error ("invalid opcode %d", (int)opcode);
}
*dest = result;
MONO_ADD_INS (cfg->cbb, ins);
NULLIFY_INS (args [0]);
return ins;
}
}
} else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality") &&
args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) {
CompRelation rel = get_rttype_ins_relation (args [0], args [1]);
if (rel == CMP_EQ) {
if (cfg->verbose_level > 2)
printf ("-> true\n");
EMIT_NEW_ICONST (cfg, ins, 1);
} else if (rel == CMP_NE) {
if (cfg->verbose_level > 2)
printf ("-> false\n");
EMIT_NEW_ICONST (cfg, ins, 0);
} else {
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
MONO_INST_NEW (cfg, ins, OP_PCEQ);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
} else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Inequality") &&
args [0]->klass == mono_defaults.runtimetype_class && args [1]->klass == mono_defaults.runtimetype_class) {
CompRelation rel = get_rttype_ins_relation (args [0], args [1]);
if (rel == CMP_NE) {
if (cfg->verbose_level > 2)
printf ("-> true\n");
EMIT_NEW_ICONST (cfg, ins, 1);
} else if (rel == CMP_EQ) {
if (cfg->verbose_level > 2)
printf ("-> false\n");
EMIT_NEW_ICONST (cfg, ins, 0);
} else {
EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
MONO_INST_NEW (cfg, ins, OP_ICNEQ);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
} else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "get_IsValueType") &&
args [0]->klass == mono_defaults.runtimetype_class) {
MonoClass *k1 = get_class_from_ldtoken_ins (args [0]);
if (k1) {
MonoType *t1 = m_class_get_byval_arg (k1);
MonoType *constraint1 = NULL;
/* Common case in gshared BCL code: t1 is a gshared type like T_INT */
if (mono_class_is_gparam (k1)) {
MonoGenericParam *gparam = t1->data.generic_param;
constraint1 = gparam->gshared_constraint;
if (constraint1) {
if (constraint1->type == MONO_TYPE_OBJECT) {
if (cfg->verbose_level > 2)
printf ("-> false\n");
EMIT_NEW_ICONST (cfg, ins, 0);
return ins;
} else if (MONO_TYPE_IS_PRIMITIVE (constraint1)) {
if (cfg->verbose_level > 2)
printf ("-> true\n");
EMIT_NEW_ICONST (cfg, ins, 1);
return ins;
}
}
}
}
return NULL;
} else if (((!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.iOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.TVOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.MacCatalyst") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Xamarin.Mac") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.iOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.tvOS") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.MacCatalyst") ||
!strcmp (cmethod_klass_image->assembly->aname.name, "Microsoft.macOS")) &&
!strcmp (cmethod_klass_name_space, "ObjCRuntime") &&
!strcmp (cmethod_klass_name, "Selector"))
) {
if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
cfg->compile_aot) {
MonoInst *pi;
MonoJumpInfoToken *ji;
char *s;
if (args [0]->opcode == OP_GOT_ENTRY) {
pi = (MonoInst *)args [0]->inst_p1;
g_assert (pi->opcode == OP_PATCH_INFO);
g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
ji = (MonoJumpInfoToken *)pi->inst_p0;
} else {
g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
ji = (MonoJumpInfoToken *)args [0]->inst_p0;
}
NULLIFY_INS (args [0]);
s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), cfg->error);
return_val_if_nok (cfg->error, NULL);
MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
ins->dreg = mono_alloc_ireg (cfg);
// FIXME: Leaks
ins->inst_p0 = s;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System.Runtime.InteropServices") == 0) &&
(strcmp (cmethod_klass_name, "Marshal") == 0)) {
//Convert Marshal.PtrToStructure<T> of blittable T to direct loads
if (strcmp (cmethod->name, "PtrToStructure") == 0 &&
cmethod->is_inflated &&
fsig->param_count == 1 &&
!mini_method_check_context_used (cfg, cmethod)) {
MonoGenericContext *method_context = mono_method_get_context (cmethod);
MonoType *arg0 = method_context->method_inst->type_argv [0];
if (mono_type_is_native_blittable (arg0))
return mini_emit_memory_load (cfg, arg0, args [0], 0, 0);
}
} else if (cmethod->klass == mono_defaults.enum_class && !strcmp (cmethod->name, "HasFlag") &&
args [0]->opcode == OP_BOX && args [1]->opcode == OP_BOX_ICONST && args [0]->klass == args [1]->klass) {
args [1]->opcode = OP_ICONST;
ins = mini_handle_enum_has_flag (cfg, args [0]->klass, NULL, args [0]->sreg1, args [1]);
NULLIFY_INS (args [0]);
return ins;
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System") &&
(!strcmp (cmethod_klass_name, "Span`1") || !strcmp (cmethod_klass_name, "ReadOnlySpan`1"))) {
return emit_span_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") &&
!strcmp (cmethod_klass_name, "Unsafe")) {
return emit_unsafe_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
!strcmp (cmethod_klass_name_space, "System.Runtime.CompilerServices") &&
!strcmp (cmethod_klass_name, "JitHelpers")) {
return emit_jit_helpers_intrinsics (cfg, cmethod, fsig, args);
} else if (in_corlib &&
(strcmp (cmethod_klass_name_space, "System") == 0) &&
(strcmp (cmethod_klass_name, "Activator") == 0)) {
MonoGenericContext *method_context = mono_method_get_context (cmethod);
if (!strcmp (cmethod->name, "CreateInstance") &&
fsig->param_count == 0 &&
method_context != NULL &&
method_context->method_inst->type_argc == 1 &&
cmethod->is_inflated &&
!mini_method_check_context_used (cfg, cmethod)) {
MonoType *t = method_context->method_inst->type_argv [0];
MonoClass *arg0 = mono_class_from_mono_type_internal (t);
if (m_class_is_valuetype (arg0) && !mono_class_has_default_constructor (arg0, FALSE)) {
if (m_class_is_primitive (arg0)) {
int dreg = alloc_dreg (cfg, mini_type_to_stack_type (cfg, t));
mini_emit_init_rvar (cfg, dreg, t);
ins = cfg->cbb->last_ins;
} else {
MONO_INST_NEW (cfg, ins, MONO_CLASS_IS_SIMD (cfg, arg0) ? OP_XZERO : OP_VZERO);
ins->dreg = mono_alloc_dreg (cfg, STACK_VTYPE);
ins->type = STACK_VTYPE;
ins->klass = arg0;
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
}
}
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (cfg->opt & MONO_OPT_SIMD) {
ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
}
#endif
/* Fallback if SIMD is disabled */
if (in_corlib && !strcmp ("System.Numerics", cmethod_klass_name_space) && !strcmp ("Vector", cmethod_klass_name)) {
if (!strcmp (cmethod->name, "get_IsHardwareAccelerated")) {
EMIT_NEW_ICONST (cfg, ins, 0);
ins->type = STACK_I4;
return ins;
}
}
// Return false for IsSupported for all types in System.Runtime.Intrinsics.*
// if it's not handled in mono_emit_simd_intrinsics
if (in_corlib &&
!strncmp ("System.Runtime.Intrinsics", cmethod_klass_name_space, 25) &&
!strcmp (cmethod->name, "get_IsSupported")) {
EMIT_NEW_ICONST (cfg, ins, 0);
ins->type = STACK_I4;
return ins;
}
// Return false for RuntimeFeature.IsDynamicCodeSupported and RuntimeFeature.IsDynamicCodeCompiled on FullAOT, otherwise true
if (in_corlib &&
!strcmp ("System.Runtime.CompilerServices", cmethod_klass_name_space) &&
!strcmp ("RuntimeFeature", cmethod_klass_name)) {
if (!strcmp (cmethod->name, "get_IsDynamicCodeCompiled")) {
EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? 0 : 1);
ins->type = STACK_I4;
return ins;
} else if (!strcmp (cmethod->name, "get_IsDynamicCodeSupported")) {
EMIT_NEW_ICONST (cfg, ins, cfg->full_aot ? (cfg->interp ? 1 : 0) : 1);
ins->type = STACK_I4;
return ins;
}
}
if (in_corlib &&
!strcmp ("System", cmethod_klass_name_space) &&
!strcmp ("ThrowHelper", cmethod_klass_name)) {
if (!strcmp ("ThrowForUnsupportedNumericsVectorBaseType", cmethod->name)) {
/* The mono JIT can't optimize the body of this method away */
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
MonoType *t = ctx->method_inst->type_argv [0];
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_I:
case MONO_TYPE_U:
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
default:
break;
}
}
else if (!strcmp ("ThrowForUnsupportedIntrinsicsVector64BaseType", cmethod->name) ||
!strcmp ("ThrowForUnsupportedIntrinsicsVector128BaseType", cmethod->name) ||
!strcmp ("ThrowForUnsupportedIntrinsicsVector256BaseType", cmethod->name)) {
/* The mono JIT can't optimize the body of this method away */
MonoGenericContext *ctx = mono_method_get_context (cmethod);
g_assert (ctx);
g_assert (ctx->method_inst);
MonoType *t = ctx->method_inst->type_argv [0];
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
return ins;
default:
break;
}
}
}
ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
if (ins)
return ins;
if (COMPILE_LLVM (cfg)) {
ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args, in_corlib);
if (ins)
return ins;
}
return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
}
static MonoInst*
emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
{
MonoClass *eklass;
if (is_set)
eklass = mono_class_from_mono_type_internal (fsig->params [2]);
else
eklass = mono_class_from_mono_type_internal (fsig->ret);
if (is_set) {
return mini_emit_array_store (cfg, eklass, args, FALSE);
} else {
MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (eklass), addr->dreg, 0);
return ins;
}
}
static gboolean
is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
{
uint32_t align;
int param_size, return_size;
param_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (param_klass)));
return_klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (return_klass)));
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", m_class_get_name (return_klass), m_class_get_name (param_klass));
//Don't allow mixing reference types with value types
if (m_class_is_valuetype (param_klass) != m_class_is_valuetype (return_klass)) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
return FALSE;
}
if (!m_class_is_valuetype (param_klass)) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
return TRUE;
}
//That are blitable
if (m_class_has_references (param_klass) || m_class_has_references (return_klass))
return FALSE;
MonoType *param_type = m_class_get_byval_arg (param_klass);
MonoType *return_type = m_class_get_byval_arg (return_klass);
/* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
if ((MONO_TYPE_ISSTRUCT (param_type) && !MONO_TYPE_ISSTRUCT (return_type)) ||
(!MONO_TYPE_ISSTRUCT (param_type) && MONO_TYPE_ISSTRUCT (return_type))) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
return FALSE;
}
if (param_type->type == MONO_TYPE_R4 || param_type->type == MONO_TYPE_R8 ||
return_type->type == MONO_TYPE_R4 || return_type->type == MONO_TYPE_R8) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
return FALSE;
}
param_size = mono_class_value_size (param_klass, &align);
return_size = mono_class_value_size (return_klass, &align);
//We can do it if sizes match
if (param_size == return_size) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
return TRUE;
}
//No simple way to handle struct if sizes don't match
if (MONO_TYPE_ISSTRUCT (param_type)) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
return FALSE;
}
/*
* Same reg size category.
* A quick note on why we don't require widening here.
* The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
*
* Since the source value comes from a function argument, the JIT will already have
* the value in a VREG and performed any widening needed before (say, when loading from a field).
*/
if (param_size <= 4 && return_size <= 4) {
if (cfg->verbose_level > 3)
printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
return TRUE;
}
return FALSE;
}
static MonoInst*
emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
{
MonoClass *param_klass = mono_class_from_mono_type_internal (fsig->params [0]);
MonoClass *return_klass = mono_class_from_mono_type_internal (fsig->ret);
if (mini_is_gsharedvt_variable_type (fsig->ret))
return NULL;
//Valuetypes that are semantically equivalent or numbers than can be widened to
if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
return args [0];
//Arrays of valuetypes that are semantically equivalent
if (m_class_get_rank (param_klass) == 1 && m_class_get_rank (return_klass) == 1 && is_unsafe_mov_compatible (cfg, m_class_get_element_class (param_klass), m_class_get_element_class (return_klass)))
return args [0];
return NULL;
}
MonoInst*
mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field)
{
MonoClass *klass = m_field_get_parent (field);
const char *klass_name_space = m_class_get_name_space (klass);
const char *klass_name = m_class_get_name (klass);
MonoImage *klass_image = m_class_get_image (klass);
gboolean in_corlib = klass_image == mono_defaults.corlib;
gboolean is_le;
MonoInst *ins;
if (in_corlib && !strcmp (klass_name_space, "System") && !strcmp (klass_name, "BitConverter") && !strcmp (field->name, "IsLittleEndian")) {
is_le = (TARGET_BYTE_ORDER == G_LITTLE_ENDIAN);
EMIT_NEW_ICONST (cfg, ins, is_le);
return ins;
} else if ((klass == mono_defaults.int_class || klass == mono_defaults.uint_class) && strcmp (field->name, "Zero") == 0) {
EMIT_NEW_PCONST (cfg, ins, 0);
return ins;
}
return NULL;
}
#else
MONO_EMPTY_SOURCE_FILE (intrinsics);
#endif
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/method-to-ir.c | /**
* \file
* Convert CIL to the JIT internal representation
*
* Author:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2002 Ximian, Inc.
* Copyright 2003-2010 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <glib.h>
#include <mono/utils/mono-compiler.h>
#include "mini.h"
#ifndef DISABLE_JIT
#include <signal.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <math.h>
#include <string.h>
#include <ctype.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#include <mono/utils/memcheck.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/assembly-internals.h>
#include <mono/metadata/attrdefs.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/class-abi-details.h>
#include <mono/metadata/object.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/exception-internals.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/debug-internals.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
#include <mono/metadata/monitor.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/metadata/mono-basic-block.h>
#include <mono/metadata/reflection-internals.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/mono-utils-debug.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/icall-decl.h>
#include "mono/metadata/icall-signatures.h"
#include "trace.h"
#include "ir-emit.h"
#include "jit-icalls.h"
#include <mono/jit/jit.h>
#include "seq-points.h"
#include "aot-compiler.h"
#include "mini-llvm.h"
#include "mini-runtime.h"
#include "llvmonly-runtime.h"
#include "mono/utils/mono-tls-inline.h"
#define BRANCH_COST 10
#define CALL_COST 10
/* Used for the JIT */
#define INLINE_LENGTH_LIMIT 20
/*
* The aot and jit inline limits should be different,
* since aot sees the whole program so we can let opt inline methods for us,
* while the jit only sees one method, so we have to inline things ourselves.
*/
/* Used by LLVM AOT */
#define LLVM_AOT_INLINE_LENGTH_LIMIT 30
/* Used to LLVM JIT */
#define LLVM_JIT_INLINE_LENGTH_LIMIT 100
static const gboolean debug_tailcall = FALSE; // logging
static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret
gboolean
mono_tailcall_print_enabled (void)
{
return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL);
}
void
mono_tailcall_print (const char *format, ...)
{
if (!mono_tailcall_print_enabled ())
return;
va_list args;
va_start (args, format);
g_printv (format, args);
va_end (args);
}
/* These have 'cfg' as an implicit argument */
#define INLINE_FAILURE(msg) do { \
if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
inline_failure (cfg, msg); \
goto exception_exit; \
} \
} while (0)
#define CHECK_CFG_EXCEPTION do {\
if (cfg->exception_type != MONO_EXCEPTION_NONE) \
goto exception_exit; \
} while (0)
#define FIELD_ACCESS_FAILURE(method, field) do { \
field_access_failure ((cfg), (method), (field)); \
goto exception_exit; \
} while (0)
#define GENERIC_SHARING_FAILURE(opcode) do { \
if (cfg->gshared) { \
gshared_failure (cfg, opcode, __FILE__, __LINE__); \
goto exception_exit; \
} \
} while (0)
#define GSHAREDVT_FAILURE(opcode) do { \
if (cfg->gsharedvt) { \
gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
goto exception_exit; \
} \
} while (0)
#define OUT_OF_MEMORY_FAILURE do { \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
mono_error_set_out_of_memory (cfg->error, ""); \
goto exception_exit; \
} while (0)
#define DISABLE_AOT(cfg) do { \
if ((cfg)->verbose_level >= 2) \
printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
(cfg)->disable_aot = TRUE; \
} while (0)
#define LOAD_ERROR do { \
break_on_unverified (); \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
goto exception_exit; \
} while (0)
#define TYPE_LOAD_ERROR(klass) do { \
cfg->exception_ptr = klass; \
LOAD_ERROR; \
} while (0)
#define CHECK_CFG_ERROR do {\
if (!is_ok (cfg->error)) { \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
goto mono_error_exit; \
} \
} while (0)
int mono_op_to_op_imm (int opcode);
int mono_op_to_op_imm_noemul (int opcode);
static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty);
static MonoInst*
convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins);
/* helper methods signatures */
/* type loading helpers */
static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1")
static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1");
/*
* Instruction metadata
*/
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
#define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
#define NONE ' '
#define IREG 'i'
#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
#if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
#define LREG IREG
#else
#define LREG 'l'
#endif
/* keep in sync with the enum in mini.h */
const char
mini_ins_info[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
#define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
#define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
/*
* This should contain the index of the last sreg + 1. This is not the same
* as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
*/
const gint8 mini_ins_sreg_counts[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
guint32
mono_alloc_ireg (MonoCompile *cfg)
{
return alloc_ireg (cfg);
}
guint32
mono_alloc_lreg (MonoCompile *cfg)
{
return alloc_lreg (cfg);
}
guint32
mono_alloc_freg (MonoCompile *cfg)
{
return alloc_freg (cfg);
}
guint32
mono_alloc_preg (MonoCompile *cfg)
{
return alloc_preg (cfg);
}
guint32
mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
{
return alloc_dreg (cfg, stack_type);
}
/*
* mono_alloc_ireg_ref:
*
* Allocate an IREG, and mark it as holding a GC ref.
*/
guint32
mono_alloc_ireg_ref (MonoCompile *cfg)
{
return alloc_ireg_ref (cfg);
}
/*
* mono_alloc_ireg_mp:
*
* Allocate an IREG, and mark it as holding a managed pointer.
*/
guint32
mono_alloc_ireg_mp (MonoCompile *cfg)
{
return alloc_ireg_mp (cfg);
}
/*
* mono_alloc_ireg_copy:
*
* Allocate an IREG with the same GC type as VREG.
*/
guint32
mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
{
if (vreg_is_ref (cfg, vreg))
return alloc_ireg_ref (cfg);
else if (vreg_is_mp (cfg, vreg))
return alloc_ireg_mp (cfg);
else
return alloc_ireg (cfg);
}
guint
mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
{
if (m_type_is_byref (type))
return OP_MOVE;
type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_MOVE;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_MOVE;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_MOVE;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_MOVE;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_MOVE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#if SIZEOF_REGISTER == 8
return OP_MOVE;
#else
return OP_LMOVE;
#endif
case MONO_TYPE_R4:
return OP_RMOVE;
case MONO_TYPE_R8:
return OP_FMOVE;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_XMOVE;
return OP_VMOVE;
case MONO_TYPE_TYPEDBYREF:
return OP_VMOVE;
case MONO_TYPE_GENERICINST:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_XMOVE;
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_type_var_is_vt (type))
return OP_VMOVE;
else
return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
default:
g_error ("unknown type 0x%02x in type_to_regstore", type->type);
}
return -1;
}
void
mono_print_bb (MonoBasicBlock *bb, const char *msg)
{
int i;
MonoInst *tree;
GString *str = g_string_new ("");
g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
for (i = 0; i < bb->in_count; ++i)
g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
g_string_append_printf (str, ", OUT: ");
for (i = 0; i < bb->out_count; ++i)
g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
g_string_append_printf (str, " ]\n");
g_print ("%s", str->str);
g_string_free (str, TRUE);
for (tree = bb->code; tree; tree = tree->next)
mono_print_ins_index (-1, tree);
}
static MONO_NEVER_INLINE gboolean
break_on_unverified (void)
{
if (mini_debug_options.break_on_unverified) {
G_BREAKPOINT ();
return TRUE;
}
return FALSE;
}
static void
clear_cfg_error (MonoCompile *cfg)
{
mono_error_cleanup (cfg->error);
error_init (cfg->error);
}
static MONO_NEVER_INLINE void
field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
{
char *method_fname = mono_method_full_name (method, TRUE);
char *field_fname = mono_field_full_name (field);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_generic_error (cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
g_free (method_fname);
g_free (field_fname);
}
static MONO_NEVER_INLINE void
inline_failure (MonoCompile *cfg, const char *msg)
{
if (cfg->verbose_level >= 2)
printf ("inline failed: %s\n", msg);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
}
static MONO_NEVER_INLINE void
gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
if (cfg->verbose_level > 2)
printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
static MONO_NEVER_INLINE void
gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
if (cfg->verbose_level >= 2)
printf ("%s\n", cfg->exception_message);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
void
mini_set_inline_failure (MonoCompile *cfg, const char *msg)
{
if (cfg->verbose_level >= 2)
printf ("inline failed: %s\n", msg);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
}
/*
* When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
* foo<T> (int i) { ldarg.0; box T; }
*/
#define UNVERIFIED do { \
if (cfg->gsharedvt) { \
if (cfg->verbose_level > 2) \
printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
goto exception_exit; \
} \
break_on_unverified (); \
goto unverified; \
} while (0)
#define GET_BBLOCK(cfg,tblock,ip) do { \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
NEW_BBLOCK (cfg, (tblock)); \
(tblock)->cil_code = (ip); \
ADD_BBLOCK (cfg, (tblock)); \
} \
} while (0)
/* Emit conversions so both operands of a binary opcode are of the same type */
static void
add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
{
MonoInst *arg1 = *arg1_ref;
MonoInst *arg2 = *arg2_ref;
if (((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
(arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
MonoInst *conv;
/* Mixing r4/r8 is allowed by the spec */
if (arg1->type == STACK_R4) {
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
conv->type = STACK_R8;
ins->sreg1 = dreg;
*arg1_ref = conv;
}
if (arg2->type == STACK_R4) {
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
conv->type = STACK_R8;
ins->sreg2 = dreg;
*arg2_ref = conv;
}
}
#if SIZEOF_REGISTER == 8
/* FIXME: Need to add many more cases */
if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
MonoInst *widen;
int dr = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
(ins)->sreg2 = widen->dreg;
}
#endif
}
#define ADD_UNOP(op) do { \
MONO_INST_NEW (cfg, ins, (op)); \
sp--; \
ins->sreg1 = sp [0]->dreg; \
type_from_op (cfg, ins, sp [0], NULL); \
CHECK_TYPE (ins); \
(ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
*sp++ = mono_decompose_opcode (cfg, ins); \
} while (0)
#define ADD_BINCOND(next_block) do { \
MonoInst *cmp; \
sp -= 2; \
MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
cmp->sreg1 = sp [0]->dreg; \
cmp->sreg2 = sp [1]->dreg; \
add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
type_from_op (cfg, cmp, sp [0], sp [1]); \
CHECK_TYPE (cmp); \
type_from_op (cfg, ins, sp [0], sp [1]); \
ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
GET_BBLOCK (cfg, tblock, target); \
link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_true_bb = tblock; \
if ((next_block)) { \
link_bblock (cfg, cfg->cbb, (next_block)); \
ins->inst_false_bb = (next_block); \
start_new_bblock = 1; \
} else { \
GET_BBLOCK (cfg, tblock, next_ip); \
link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_false_bb = tblock; \
start_new_bblock = 2; \
} \
if (sp != stack_start) { \
handle_stack_args (cfg, stack_start, sp - stack_start); \
CHECK_UNVERIFIABLE (cfg); \
} \
MONO_ADD_INS (cfg->cbb, cmp); \
MONO_ADD_INS (cfg->cbb, ins); \
} while (0)
/* *
* link_bblock: Links two basic blocks
*
* links two basic blocks in the control flow graph, the 'from'
* argument is the starting block and the 'to' argument is the block
* the control flow ends to after 'from'.
*/
static void
link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
MonoBasicBlock **newa;
int i, found;
#if 0
if (from->cil_code) {
if (to->cil_code)
printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
else
printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
} else {
if (to->cil_code)
printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
else
printf ("edge from entry to exit\n");
}
#endif
found = FALSE;
for (i = 0; i < from->out_count; ++i) {
if (to == from->out_bb [i]) {
found = TRUE;
break;
}
}
if (!found) {
newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
for (i = 0; i < from->out_count; ++i) {
newa [i] = from->out_bb [i];
}
newa [i] = to;
from->out_count++;
from->out_bb = newa;
}
found = FALSE;
for (i = 0; i < to->in_count; ++i) {
if (from == to->in_bb [i]) {
found = TRUE;
break;
}
}
if (!found) {
newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
for (i = 0; i < to->in_count; ++i) {
newa [i] = to->in_bb [i];
}
newa [i] = from;
to->in_count++;
to->in_bb = newa;
}
}
void
mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
link_bblock (cfg, from, to);
}
static void
mono_create_spvar_for_region (MonoCompile *cfg, int region);
static void
mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end)
{
MonoBasicBlock *bb = cfg->cil_offset_to_bb [start];
//start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
g_assert (bb);
if (cfg->verbose_level > 1)
g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num);
for (; bb && bb->real_offset < end; bb = bb->next_bb) {
//no one claimed this bb, take it.
if (bb->region == -1) {
bb->region = region;
continue;
}
//current region is an early handler, bail
if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) {
continue;
}
//current region is a try, only overwrite if new region is a handler
if ((region & (0xf << 4)) != MONO_REGION_TRY) {
bb->region = region;
}
}
if (cfg->spvars)
mono_create_spvar_for_region (cfg, region);
}
static void
compute_bb_regions (MonoCompile *cfg)
{
MonoBasicBlock *bb;
MonoMethodHeader *header = cfg->header;
int i;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
bb->region = -1;
for (i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset);
guint handler_region;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
else
handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len);
mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len);
}
if (cfg->verbose_level > 2) {
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
}
}
static gboolean
ip_in_finally_clause (MonoCompile *cfg, int offset)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
continue;
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return TRUE;
}
return FALSE;
}
/* Find clauses between ip and target, from inner to outer */
static GList*
mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
GList *res = NULL;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
(!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause));
leave->index = i;
leave->clause = clause;
res = g_list_append_mempool (cfg->mempool, res, leave);
}
}
return res;
}
static void
mono_create_spvar_for_region (MonoCompile *cfg, int region)
{
MonoInst *var;
var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
if (var)
return;
var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
}
MonoInst *
mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
{
return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
}
static MonoInst*
mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
{
MonoInst *var;
var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
if (var)
return var;
var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL);
/* prevent it from being register allocated */
var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
return var;
}
/*
* Returns the type used in the eval stack when @type is loaded.
* FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
*/
void
mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
{
MonoClass *klass;
type = mini_get_underlying_type (type);
inst->klass = klass = mono_class_from_mono_type_internal (type);
if (m_type_is_byref (type)) {
inst->type = STACK_MP;
return;
}
handle_enum:
switch (type->type) {
case MONO_TYPE_VOID:
inst->type = STACK_INV;
return;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
inst->type = STACK_I4;
return;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
inst->type = STACK_PTR;
return;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
inst->type = STACK_OBJ;
return;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
inst->type = STACK_I8;
return;
case MONO_TYPE_R4:
inst->type = cfg->r4_stack_type;
break;
case MONO_TYPE_R8:
inst->type = STACK_R8;
return;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
} else {
inst->klass = klass;
inst->type = STACK_VTYPE;
return;
}
case MONO_TYPE_TYPEDBYREF:
inst->klass = mono_defaults.typed_reference_class;
inst->type = STACK_VTYPE;
return;
case MONO_TYPE_GENERICINST:
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_is_gsharedvt_type (type)) {
g_assert (cfg->gsharedvt);
inst->type = STACK_VTYPE;
} else {
mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
}
return;
default:
g_error ("unknown type 0x%02x in eval stack type", type->type);
}
}
/*
* The following tables are used to quickly validate the IL code in type_from_op ().
*/
#define IF_P8(v) (SIZEOF_VOID_P == 8 ? v : STACK_INV)
#define IF_P8_I8 IF_P8(STACK_I8)
#define IF_P8_PTR IF_P8(STACK_PTR)
static const char
bin_num_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
{STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
{STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
};
static const char
neg_table [] = {
STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
};
/* reduce the size of this table */
static const char
bin_int_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
};
#define P1 (SIZEOF_VOID_P == 8)
static const char
bin_comp_table [STACK_MAX] [STACK_MAX] = {
/* Inv i L p F & O vt r4 */
{0},
{0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
{0, 0, 1,P1, 0, 0, 0, 0}, /* L, int64 */
{0, 1,P1, 1, 0, 2, 4, 0}, /* p, ptr */
{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
{0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
{0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
{0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
};
#undef P1
/* reduce the size of this table */
static const char
shift_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
};
/*
* Tables to map from the non-specific opcode to the matching
* type-specific opcode.
*/
/* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
static const guint16
binops_op_map [STACK_MAX] = {
0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
};
/* handles from CEE_NEG to CEE_CONV_U8 */
static const guint16
unops_op_map [STACK_MAX] = {
0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
};
/* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
static const guint16
ovfops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
};
/* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
static const guint16
ovf2ops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
};
/* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
static const guint16
ovf3ops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
};
/* handles from CEE_BEQ to CEE_BLT_UN */
static const guint16
beqops_op_map [STACK_MAX] = {
0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
};
/* handles from CEE_CEQ to CEE_CLT_UN */
static const guint16
ceqops_op_map [STACK_MAX] = {
0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
};
/*
* Sets ins->type (the type on the eval stack) according to the
* type of the opcode and the arguments to it.
* Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
*
* FIXME: this function sets ins->type unconditionally in some cases, but
* it should set it to invalid for some types (a conv.x on an object)
*/
static void
type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
{
switch (ins->opcode) {
/* binops */
case MONO_CEE_ADD:
case MONO_CEE_SUB:
case MONO_CEE_MUL:
case MONO_CEE_DIV:
case MONO_CEE_REM:
/* FIXME: check unverifiable args for STACK_MP */
ins->type = bin_num_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case MONO_CEE_DIV_UN:
case MONO_CEE_REM_UN:
case MONO_CEE_AND:
case MONO_CEE_OR:
case MONO_CEE_XOR:
ins->type = bin_int_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case MONO_CEE_SHL:
case MONO_CEE_SHR:
case MONO_CEE_SHR_UN:
ins->type = shift_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case OP_COMPARE:
case OP_LCOMPARE:
case OP_ICOMPARE:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
else if (src1->type == STACK_R4)
ins->opcode = OP_RCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
else
ins->opcode = OP_ICOMPARE;
break;
case OP_ICOMPARE_IMM:
ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE_IMM;
break;
case MONO_CEE_BEQ:
case MONO_CEE_BGE:
case MONO_CEE_BGT:
case MONO_CEE_BLE:
case MONO_CEE_BLT:
case MONO_CEE_BNE_UN:
case MONO_CEE_BGE_UN:
case MONO_CEE_BGT_UN:
case MONO_CEE_BLE_UN:
case MONO_CEE_BLT_UN:
ins->opcode += beqops_op_map [src1->type];
break;
case OP_CEQ:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
ins->opcode += ceqops_op_map [src1->type];
break;
case OP_CGT:
case OP_CGT_UN:
case OP_CLT:
case OP_CLT_UN:
ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
ins->opcode += ceqops_op_map [src1->type];
break;
/* unops */
case MONO_CEE_NEG:
ins->type = neg_table [src1->type];
ins->opcode += unops_op_map [ins->type];
break;
case MONO_CEE_NOT:
if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
ins->type = src1->type;
else
ins->type = STACK_INV;
ins->opcode += unops_op_map [ins->type];
break;
case MONO_CEE_CONV_I1:
case MONO_CEE_CONV_I2:
case MONO_CEE_CONV_I4:
case MONO_CEE_CONV_U4:
ins->type = STACK_I4;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_R_UN:
ins->type = STACK_R8;
switch (src1->type) {
case STACK_I4:
case STACK_PTR:
ins->opcode = OP_ICONV_TO_R_UN;
break;
case STACK_I8:
ins->opcode = OP_LCONV_TO_R_UN;
break;
case STACK_R4:
ins->opcode = OP_RCONV_TO_R8;
break;
case STACK_R8:
ins->opcode = OP_FMOVE;
break;
}
break;
case MONO_CEE_CONV_OVF_I1:
case MONO_CEE_CONV_OVF_U1:
case MONO_CEE_CONV_OVF_I2:
case MONO_CEE_CONV_OVF_U2:
case MONO_CEE_CONV_OVF_I4:
case MONO_CEE_CONV_OVF_U4:
ins->type = STACK_I4;
ins->opcode += ovf3ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I_UN:
case MONO_CEE_CONV_OVF_U_UN:
ins->type = STACK_PTR;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I1_UN:
case MONO_CEE_CONV_OVF_I2_UN:
case MONO_CEE_CONV_OVF_I4_UN:
case MONO_CEE_CONV_OVF_U1_UN:
case MONO_CEE_CONV_OVF_U2_UN:
case MONO_CEE_CONV_OVF_U4_UN:
ins->type = STACK_I4;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_U:
ins->type = STACK_PTR;
switch (src1->type) {
case STACK_I4:
ins->opcode = OP_ICONV_TO_U;
break;
case STACK_PTR:
case STACK_MP:
case STACK_OBJ:
#if TARGET_SIZEOF_VOID_P == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->opcode = OP_MOVE;
#endif
break;
case STACK_I8:
ins->opcode = OP_LCONV_TO_U;
break;
case STACK_R8:
if (TARGET_SIZEOF_VOID_P == 8)
ins->opcode = OP_FCONV_TO_U8;
else
ins->opcode = OP_FCONV_TO_U4;
break;
case STACK_R4:
if (TARGET_SIZEOF_VOID_P == 8)
ins->opcode = OP_RCONV_TO_U8;
else
ins->opcode = OP_RCONV_TO_U4;
break;
}
break;
case MONO_CEE_CONV_I8:
case MONO_CEE_CONV_U8:
ins->type = STACK_I8;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I8:
case MONO_CEE_CONV_OVF_U8:
ins->type = STACK_I8;
ins->opcode += ovf3ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_U8_UN:
case MONO_CEE_CONV_OVF_I8_UN:
ins->type = STACK_I8;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_R4:
ins->type = cfg->r4_stack_type;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_R8:
ins->type = STACK_R8;
ins->opcode += unops_op_map [src1->type];
break;
case OP_CKFINITE:
ins->type = STACK_R8;
break;
case MONO_CEE_CONV_U2:
case MONO_CEE_CONV_U1:
ins->type = STACK_I4;
ins->opcode += ovfops_op_map [src1->type];
break;
case MONO_CEE_CONV_I:
case MONO_CEE_CONV_OVF_I:
case MONO_CEE_CONV_OVF_U:
ins->type = STACK_PTR;
ins->opcode += ovfops_op_map [src1->type];
break;
case MONO_CEE_ADD_OVF:
case MONO_CEE_ADD_OVF_UN:
case MONO_CEE_MUL_OVF:
case MONO_CEE_MUL_OVF_UN:
case MONO_CEE_SUB_OVF:
case MONO_CEE_SUB_OVF_UN:
ins->type = bin_num_table [src1->type] [src2->type];
ins->opcode += ovfops_op_map [src1->type];
if (ins->type == STACK_R8)
ins->type = STACK_INV;
break;
case OP_LOAD_MEMBASE:
ins->type = STACK_PTR;
break;
case OP_LOADI1_MEMBASE:
case OP_LOADU1_MEMBASE:
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
ins->type = STACK_PTR;
break;
case OP_LOADI8_MEMBASE:
ins->type = STACK_I8;
break;
case OP_LOADR4_MEMBASE:
ins->type = cfg->r4_stack_type;
break;
case OP_LOADR8_MEMBASE:
ins->type = STACK_R8;
break;
default:
g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
break;
}
if (ins->type == STACK_MP) {
if (src1->type == STACK_MP)
ins->klass = src1->klass;
else
ins->klass = mono_defaults.object_class;
}
}
void
mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
{
type_from_op (cfg, ins, src1, src2);
}
static MonoClass*
ldind_to_type (int op)
{
switch (op) {
case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class;
case MONO_CEE_LDIND_U1: return mono_defaults.byte_class;
case MONO_CEE_LDIND_I2: return mono_defaults.int16_class;
case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class;
case MONO_CEE_LDIND_I4: return mono_defaults.int32_class;
case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class;
case MONO_CEE_LDIND_I8: return mono_defaults.int64_class;
case MONO_CEE_LDIND_I: return mono_defaults.int_class;
case MONO_CEE_LDIND_R4: return mono_defaults.single_class;
case MONO_CEE_LDIND_R8: return mono_defaults.double_class;
case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
default: g_error ("Unknown ldind type %d", op);
}
}
static MonoClass*
stind_to_type (int op)
{
switch (op) {
case MONO_CEE_STIND_I1: return mono_defaults.sbyte_class;
case MONO_CEE_STIND_I2: return mono_defaults.int16_class;
case MONO_CEE_STIND_I4: return mono_defaults.int32_class;
case MONO_CEE_STIND_I8: return mono_defaults.int64_class;
case MONO_CEE_STIND_I: return mono_defaults.int_class;
case MONO_CEE_STIND_R4: return mono_defaults.single_class;
case MONO_CEE_STIND_R8: return mono_defaults.double_class;
case MONO_CEE_STIND_REF: return mono_defaults.object_class;
default: g_error ("Unknown stind type %d", op);
}
}
#if 0
static const char
param_table [STACK_MAX] [STACK_MAX] = {
{0},
};
static int
check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
{
int i;
if (sig->hasthis) {
switch (args->type) {
case STACK_I4:
case STACK_I8:
case STACK_R8:
case STACK_VTYPE:
case STACK_INV:
return 0;
}
args++;
}
for (i = 0; i < sig->param_count; ++i) {
switch (args [i].type) {
case STACK_INV:
return 0;
case STACK_MP:
if (m_type_is_byref (!sig->params [i]))
return 0;
continue;
case STACK_OBJ:
if (m_type_is_byref (sig->params [i]))
return 0;
switch (m_type_is_byref (sig->params [i])) {
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
break;
default:
return 0;
}
continue;
case STACK_R8:
if (m_type_is_byref (sig->params [i]))
return 0;
if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
return 0;
continue;
case STACK_PTR:
case STACK_I4:
case STACK_I8:
case STACK_VTYPE:
break;
}
/*if (!param_table [args [i].type] [sig->params [i]->type])
return 0;*/
}
return 1;
}
#endif
/*
* The got_var contains the address of the Global Offset Table when AOT
* compiling.
*/
MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
}
return cfg->got_var;
}
static void
mono_create_rgctx_var (MonoCompile *cfg)
{
if (!cfg->rgctx_var) {
cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* force the var to be stack allocated */
if (!cfg->llvm_only)
cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
}
}
static MonoInst *
mono_get_mrgctx_var (MonoCompile *cfg)
{
g_assert (cfg->gshared);
mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
static MonoInst *
mono_get_vtable_var (MonoCompile *cfg)
{
g_assert (cfg->gshared);
/* The mrgctx and the vtable are stored in the same var */
mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
static MonoType*
type_from_stack_type (MonoInst *ins) {
switch (ins->type) {
case STACK_I4: return mono_get_int32_type ();
case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
case STACK_PTR: return mono_get_int_type ();
case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class);
case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
case STACK_MP:
return m_class_get_this_arg (ins->klass);
case STACK_OBJ: return mono_get_object_type ();
case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
default:
g_error ("stack type %d to monotype not handled\n", ins->type);
}
return NULL;
}
MonoStackType
mini_type_to_stack_type (MonoCompile *cfg, MonoType *t)
{
t = mini_type_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return STACK_I4;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return STACK_PTR;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return STACK_OBJ;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return STACK_I8;
case MONO_TYPE_R4:
return (MonoStackType)cfg->r4_stack_type;
case MONO_TYPE_R8:
return STACK_R8;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
return STACK_VTYPE;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (t))
return STACK_VTYPE;
else
return STACK_OBJ;
break;
default:
g_assert_not_reached ();
}
return (MonoStackType)-1;
}
static MonoClass*
array_access_to_klass (int opcode)
{
switch (opcode) {
case MONO_CEE_LDELEM_U1:
return mono_defaults.byte_class;
case MONO_CEE_LDELEM_U2:
return mono_defaults.uint16_class;
case MONO_CEE_LDELEM_I:
case MONO_CEE_STELEM_I:
return mono_defaults.int_class;
case MONO_CEE_LDELEM_I1:
case MONO_CEE_STELEM_I1:
return mono_defaults.sbyte_class;
case MONO_CEE_LDELEM_I2:
case MONO_CEE_STELEM_I2:
return mono_defaults.int16_class;
case MONO_CEE_LDELEM_I4:
case MONO_CEE_STELEM_I4:
return mono_defaults.int32_class;
case MONO_CEE_LDELEM_U4:
return mono_defaults.uint32_class;
case MONO_CEE_LDELEM_I8:
case MONO_CEE_STELEM_I8:
return mono_defaults.int64_class;
case MONO_CEE_LDELEM_R4:
case MONO_CEE_STELEM_R4:
return mono_defaults.single_class;
case MONO_CEE_LDELEM_R8:
case MONO_CEE_STELEM_R8:
return mono_defaults.double_class;
case MONO_CEE_LDELEM_REF:
case MONO_CEE_STELEM_REF:
return mono_defaults.object_class;
default:
g_assert_not_reached ();
}
return NULL;
}
/*
* We try to share variables when possible
*/
static MonoInst *
mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
{
MonoInst *res;
int pos, vnum;
MonoType *type;
type = type_from_stack_type (ins);
/* inlining can result in deeper stacks */
if (cfg->inline_depth || slot >= cfg->header->max_stack)
return mono_compile_create_var (cfg, type, OP_LOCAL);
pos = ins->type - 1 + slot * STACK_MAX;
switch (ins->type) {
case STACK_I4:
case STACK_I8:
case STACK_R8:
case STACK_PTR:
case STACK_MP:
case STACK_OBJ:
if ((vnum = cfg->intvars [pos]))
return cfg->varinfo [vnum];
res = mono_compile_create_var (cfg, type, OP_LOCAL);
cfg->intvars [pos] = res->inst_c0;
break;
default:
res = mono_compile_create_var (cfg, type, OP_LOCAL);
}
return res;
}
static void
mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
{
/*
* Don't use this if a generic_context is set, since that means AOT can't
* look up the method using just the image+token.
* table == 0 means this is a reference made from a wrapper.
*/
if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
jump_info_token->image = image;
jump_info_token->token = token;
g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
}
}
/*
* This function is called to handle items that are left on the evaluation stack
* at basic block boundaries. What happens is that we save the values to local variables
* and we reload them later when first entering the target basic block (with the
* handle_loaded_temps () function).
* A single joint point will use the same variables (stored in the array bb->out_stack or
* bb->in_stack, if the basic block is before or after the joint point).
*
* This function needs to be called _before_ emitting the last instruction of
* the bb (i.e. before emitting a branch).
* If the stack merge fails at a join point, cfg->unverifiable is set.
*/
static void
handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
{
int i, bindex;
MonoBasicBlock *bb = cfg->cbb;
MonoBasicBlock *outb;
MonoInst *inst, **locals;
gboolean found;
if (!count)
return;
if (cfg->verbose_level > 3)
printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
if (!bb->out_scount) {
bb->out_scount = count;
//printf ("bblock %d has out:", bb->block_num);
found = FALSE;
for (i = 0; i < bb->out_count; ++i) {
outb = bb->out_bb [i];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER)
continue;
//printf (" %d", outb->block_num);
if (outb->in_stack) {
found = TRUE;
bb->out_stack = outb->in_stack;
break;
}
}
//printf ("\n");
if (!found) {
bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
for (i = 0; i < count; ++i) {
/*
* try to reuse temps already allocated for this purpouse, if they occupy the same
* stack slot and if they are of the same type.
* This won't cause conflicts since if 'local' is used to
* store one of the values in the in_stack of a bblock, then
* the same variable will be used for the same outgoing stack
* slot as well.
* This doesn't work when inlining methods, since the bblocks
* in the inlined methods do not inherit their in_stack from
* the bblock they are inlined to. See bug #58863 for an
* example.
*/
bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
}
}
}
for (i = 0; i < bb->out_count; ++i) {
outb = bb->out_bb [i];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER)
continue;
if (outb->in_scount) {
if (outb->in_scount != bb->out_scount) {
cfg->unverifiable = TRUE;
return;
}
continue; /* check they are the same locals */
}
outb->in_scount = count;
outb->in_stack = bb->out_stack;
}
locals = bb->out_stack;
cfg->cbb = bb;
for (i = 0; i < count; ++i) {
sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]);
EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
inst->cil_code = sp [i]->cil_code;
sp [i] = locals [i];
if (cfg->verbose_level > 3)
printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
}
/*
* It is possible that the out bblocks already have in_stack assigned, and
* the in_stacks differ. In this case, we will store to all the different
* in_stacks.
*/
found = TRUE;
bindex = 0;
while (found) {
/* Find a bblock which has a different in_stack */
found = FALSE;
while (bindex < bb->out_count) {
outb = bb->out_bb [bindex];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER) {
bindex++;
continue;
}
if (outb->in_stack != locals) {
for (i = 0; i < count; ++i) {
sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]);
EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
inst->cil_code = sp [i]->cil_code;
sp [i] = locals [i];
if (cfg->verbose_level > 3)
printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
}
locals = outb->in_stack;
found = TRUE;
break;
}
bindex ++;
}
}
}
MonoInst*
mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
{
MonoInst *ins;
if (cfg->compile_aot) {
MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size
EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
MONO_RESTORE_WARNING
} else {
MonoJumpInfo ji;
gpointer target;
ERROR_DECL (error);
ji.type = patch_type;
ji.data.target = data;
target = mono_resolve_patch_target_ext (cfg->mem_manager, NULL, NULL, &ji, FALSE, error);
mono_error_assert_ok (error);
EMIT_NEW_PCONST (cfg, ins, target);
}
return ins;
}
static MonoInst*
mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
{
int tls_offset = mono_tls_get_tls_offset (key);
if (cfg->compile_aot)
return NULL;
if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_TLS_GET);
ins->dreg = mono_alloc_preg (cfg);
ins->inst_offset = tls_offset;
return ins;
}
return NULL;
}
static MonoInst*
mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
{
MonoInst *fast_tls = NULL;
if (!mini_debug_options.use_fallback_tls)
fast_tls = mono_create_fast_tls_getter (cfg, key);
if (fast_tls) {
MONO_ADD_INS (cfg->cbb, fast_tls);
return fast_tls;
}
const MonoJitICallId jit_icall_id = mono_get_tls_key_to_jit_icall_id (key);
if (cfg->compile_aot && !cfg->llvm_only) {
MonoInst *addr;
/*
* tls getters are critical pieces of code and we don't want to resolve them
* through the standard plt/tramp mechanism since we might expose ourselves
* to crashes and infinite recursions.
* Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
return mini_emit_calli (cfg, mono_icall_sig_ptr, NULL, addr, NULL, NULL);
} else {
return mono_emit_jit_icall_id (cfg, jit_icall_id, NULL);
}
}
/*
* emit_push_lmf:
*
* Emit IR to push the current LMF onto the LMF stack.
*/
static void
emit_push_lmf (MonoCompile *cfg)
{
/*
* Emit IR to push the LMF:
* lmf_addr = <lmf_addr from tls>
* lmf->lmf_addr = lmf_addr
* lmf->prev_lmf = *lmf_addr
* *lmf_addr = lmf
*/
MonoInst *ins, *lmf_ins;
if (!cfg->lmf_ir)
return;
int lmf_reg, prev_lmf_reg;
/*
* Store lmf_addr in a variable, so it can be allocated to a global register.
*/
if (!cfg->lmf_addr_var)
cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
if (!cfg->lmf_var) {
MonoInst *lmf_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
lmf_var->flags |= MONO_INST_VOLATILE;
lmf_var->flags |= MONO_INST_LMF;
cfg->lmf_var = lmf_var;
}
lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
g_assert (lmf_ins);
lmf_ins->dreg = cfg->lmf_addr_var->dreg;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
prev_lmf_reg = alloc_preg (cfg);
/* Save previous_lmf */
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
if (cfg->deopt)
/* Mark this as an LMFExt */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_POR_IMM, prev_lmf_reg, prev_lmf_reg, 2);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
/* Set new lmf */
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
}
/*
* emit_pop_lmf:
*
* Emit IR to pop the current LMF from the LMF stack.
*/
static void
emit_pop_lmf (MonoCompile *cfg)
{
int lmf_reg, lmf_addr_reg;
MonoInst *ins;
if (!cfg->lmf_ir)
return;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
int prev_lmf_reg;
/*
* Emit IR to pop the LMF:
* *(lmf->lmf_addr) = lmf->prev_lmf
*/
/* This could be called before emit_push_lmf () */
if (!cfg->lmf_addr_var)
cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
lmf_addr_reg = cfg->lmf_addr_var->dreg;
prev_lmf_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
if (cfg->deopt)
/* Clear out the bit set by push_lmf () to mark this as LMFExt */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PXOR_IMM, prev_lmf_reg, prev_lmf_reg, 2);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
}
/*
* target_type_is_incompatible:
* @cfg: MonoCompile context
*
* Check that the item @arg on the evaluation stack can be stored
* in the target type (can be a local, or field, etc).
* The cfg arg can be used to check if we need verification or just
* validity checks.
*
* Returns: non-0 value if arg can't be stored on a target.
*/
static int
target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
{
MonoType *simple_type;
MonoClass *klass;
if (m_type_is_byref (target)) {
/* FIXME: check that the pointed to types match */
if (arg->type == STACK_MP) {
/* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target))));
MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)));
/* if the target is native int& or X* or same type */
if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
return 0;
/* Both are primitive type byrefs and the source points to a larger type that the destination */
if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) &&
mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
return 0;
return 1;
}
if (arg->type == STACK_PTR)
return 0;
return 1;
}
simple_type = mini_get_underlying_type (target);
switch (simple_type->type) {
case MONO_TYPE_VOID:
return 1;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (arg->type != STACK_I4 && arg->type != STACK_PTR)
return 1;
return 0;
case MONO_TYPE_PTR:
/* STACK_MP is needed when setting pinned locals */
if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
#if SIZEOF_VOID_P == 8
if (arg->type != STACK_I8)
#endif
return 1;
return 0;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_FNPTR:
/*
* Some opcodes like ldloca returns 'transient pointers' which can be stored in
* in native int. (#688008).
*/
if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
return 1;
return 0;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (arg->type != STACK_OBJ)
return 1;
/* FIXME: check type compatibility */
return 0;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
if (arg->type != STACK_I8)
#if SIZEOF_VOID_P == 8
if (arg->type != STACK_PTR)
#endif
return 1;
return 0;
case MONO_TYPE_R4:
if (arg->type != cfg->r4_stack_type)
return 1;
return 0;
case MONO_TYPE_R8:
if (arg->type != STACK_R8)
return 1;
return 0;
case MONO_TYPE_VALUETYPE:
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
if (klass != arg->klass)
return 1;
return 0;
case MONO_TYPE_TYPEDBYREF:
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
if (klass != arg->klass)
return 1;
return 0;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (simple_type)) {
MonoClass *target_class;
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
target_class = mono_class_from_mono_type_internal (target);
/* The second cases is needed when doing partial sharing */
if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))))
return 1;
return 0;
} else {
if (arg->type != STACK_OBJ)
return 1;
/* FIXME: check type compatibility */
return 0;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_type_var_is_vt (simple_type)) {
if (arg->type != STACK_VTYPE)
return 1;
} else {
if (arg->type != STACK_OBJ)
return 1;
}
return 0;
default:
g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
}
return 1;
}
/*
* convert_value:
*
* Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
*/
static MonoInst*
convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins)
{
type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_R4:
if (ins->type == STACK_R8) {
int dreg = alloc_freg (cfg);
MonoInst *conv;
EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg);
conv->type = STACK_R4;
return conv;
}
break;
case MONO_TYPE_R8:
if (ins->type == STACK_R4) {
int dreg = alloc_freg (cfg);
MonoInst *conv;
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg);
conv->type = STACK_R8;
return conv;
}
break;
default:
break;
}
return ins;
}
/*
* Prepare arguments for passing to a function call.
* Return a non-zero value if the arguments can't be passed to the given
* signature.
* The type checks are not yet complete and some conversions may need
* casts on 32 or 64 bit architectures.
*
* FIXME: implement this using target_type_is_incompatible ()
*/
static gboolean
check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
{
MonoType *simple_type;
int i;
if (sig->hasthis) {
if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
return TRUE;
args++;
}
for (i = 0; i < sig->param_count; ++i) {
if (m_type_is_byref (sig->params [i])) {
if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
return TRUE;
continue;
}
simple_type = mini_get_underlying_type (sig->params [i]);
handle_enum:
switch (simple_type->type) {
case MONO_TYPE_VOID:
return TRUE;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
return TRUE;
continue;
case MONO_TYPE_I:
case MONO_TYPE_U:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
if (args [i]->type != STACK_I4 && !(SIZEOF_VOID_P == 8 && args [i]->type == STACK_I8) &&
args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
if (args [i]->type != STACK_I8 &&
!(SIZEOF_VOID_P == 8 && (args [i]->type == STACK_I4 || args [i]->type == STACK_PTR)))
return TRUE;
continue;
case MONO_TYPE_R4:
if (args [i]->type != cfg->r4_stack_type)
return TRUE;
continue;
case MONO_TYPE_R8:
if (args [i]->type != STACK_R8)
return TRUE;
continue;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (simple_type->data.klass)) {
simple_type = mono_class_enum_basetype_internal (simple_type->data.klass);
goto handle_enum;
}
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
case MONO_TYPE_TYPEDBYREF:
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
case MONO_TYPE_GENERICINST:
simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt */
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
default:
g_error ("unknown type 0x%02x in check_call_signature",
simple_type->type);
}
}
return FALSE;
}
MonoJumpInfo *
mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
{
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->data.target = target;
return ji;
}
int
mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
{
if (cfg->gshared)
return mono_class_check_context_used (klass);
else
return 0;
}
int
mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
{
if (cfg->gshared)
return mono_method_check_context_used (method);
else
return 0;
}
/*
* check_method_sharing:
*
* Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
*/
static void
check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
{
gboolean pass_vtable = FALSE;
gboolean pass_mrgctx = FALSE;
if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) &&
(mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
gboolean sharable = FALSE;
if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
sharable = TRUE;
/*
* Pass vtable iff target method might
* be shared, which means that sharing
* is enabled for its class and its
* context is sharable (and it's not a
* generic method).
*/
if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
pass_vtable = TRUE;
}
if (mini_method_needs_mrgctx (cmethod)) {
if (mini_method_is_default_method (cmethod))
pass_vtable = FALSE;
else
g_assert (!pass_vtable);
if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
pass_mrgctx = TRUE;
} else {
if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod)))
pass_mrgctx = TRUE;
}
}
if (out_pass_vtable)
*out_pass_vtable = pass_vtable;
if (out_pass_mrgctx)
*out_pass_mrgctx = pass_mrgctx;
}
static gboolean
direct_icalls_enabled (MonoCompile *cfg, MonoMethod *method)
{
if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
return FALSE;
if (method && cfg->compile_aot && mono_aot_direct_icalls_enabled_for_method (cfg, method))
return TRUE;
/* LLVM on amd64 can't handle calls to non-32 bit addresses */
#ifdef TARGET_AMD64
if (cfg->compile_llvm && !cfg->llvm_only)
return FALSE;
#endif
return FALSE;
}
MonoInst*
mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
{
/*
* Call the jit icall without a wrapper if possible.
* The wrapper is needed to be able to do stack walks for asynchronously suspended
* threads when debugging.
*/
if (direct_icalls_enabled (cfg, NULL)) {
int costs;
if (!info->wrapper_method) {
info->wrapper_method = mono_marshal_get_icall_wrapper (info, TRUE);
mono_memory_barrier ();
}
/*
* Inline the wrapper method, which is basically a call to the C icall, and
* an exception check.
*/
costs = inline_method (cfg, info->wrapper_method, NULL,
args, NULL, il_offset, TRUE, NULL);
g_assert (costs > 0);
g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
return args [0];
}
return mono_emit_jit_icall_id (cfg, mono_jit_icall_info_id (info), args);
}
static MonoInst*
mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
{
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
if ((fsig->pinvoke || LLVM_ENABLED) && !m_type_is_byref (fsig->ret)) {
int widen_op = -1;
/*
* Native code might return non register sized integers
* without initializing the upper bits.
*/
switch (mono_type_to_load_membase (cfg, fsig->ret)) {
case OP_LOADI1_MEMBASE:
widen_op = OP_ICONV_TO_I1;
break;
case OP_LOADU1_MEMBASE:
widen_op = OP_ICONV_TO_U1;
break;
case OP_LOADI2_MEMBASE:
widen_op = OP_ICONV_TO_I2;
break;
case OP_LOADU2_MEMBASE:
widen_op = OP_ICONV_TO_U2;
break;
default:
break;
}
if (widen_op != -1) {
int dreg = alloc_preg (cfg);
MonoInst *widen;
EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
widen->type = ins->type;
ins = widen;
}
}
}
return ins;
}
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
static void
emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
{
MonoInst *args [2];
args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
mono_emit_jit_icall (cfg, mono_throw_method_access, args);
}
static void
emit_bad_image_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
{
mono_emit_jit_icall (cfg, mono_throw_bad_image, NULL);
}
static void
emit_not_supported_failure (MonoCompile *cfg)
{
mono_emit_jit_icall (cfg, mono_throw_not_supported, NULL);
}
static void
emit_invalid_program_with_msg (MonoCompile *cfg, MonoError *error_msg, MonoMethod *caller, MonoMethod *callee)
{
g_assert (!is_ok (error_msg));
char *str = mono_mem_manager_strdup (cfg->mem_manager, mono_error_get_message (error_msg));
MonoInst *iargs[1];
if (cfg->compile_aot)
EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
else
EMIT_NEW_PCONST (cfg, iargs [0], str);
mono_emit_jit_icall (cfg, mono_throw_invalid_program, iargs);
}
// FIXME Consolidate the multiple functions named get_method_nofail.
static MonoMethod*
get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags)
{
MonoMethod *method;
ERROR_DECL (error);
method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error);
mono_error_assert_ok (error);
g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass));
return method;
}
MonoMethod*
mini_get_memcpy_method (void)
{
static MonoMethod *memcpy_method = NULL;
if (!memcpy_method) {
memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0);
if (!memcpy_method)
g_error ("Old corlib found. Install a new one");
}
return memcpy_method;
}
MonoInst*
mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
MonoInst *store;
/*
* Add a release memory barrier so the object contents are flushed
* to memory before storing the reference into another object.
*/
if (!mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, ptr->dreg, 0, value->dreg);
mini_emit_write_barrier (cfg, ptr, value);
return store;
}
void
mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
int card_table_shift_bits;
target_mgreg_t card_table_mask;
guint8 *card_table;
MonoInst *dummy_use;
int nursery_shift_bits;
size_t nursery_size;
if (!cfg->gen_write_barriers)
return;
//method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask);
mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
MonoInst *wbarrier;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
} else if (card_table) {
int offset_reg = alloc_preg (cfg);
int card_reg;
MonoInst *ins;
/*
* We emit a fast light weight write barrier. This always marks cards as in the concurrent
* collector case, so, for the serial collector, it might slightly slow down nursery
* collections. We also expect that the host system and the target system have the same card
* table configuration, which is the case if they have the same pointer size.
*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
if (card_table_mask)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
/*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
* IMM's larger than 32bits.
*/
ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
card_reg = ins->dreg;
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
} else {
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
}
EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
}
MonoMethod*
mini_get_memset_method (void)
{
static MonoMethod *memset_method = NULL;
if (!memset_method) {
memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0);
if (!memset_method)
g_error ("Old corlib found. Install a new one");
}
return memset_method;
}
void
mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
{
MonoInst *iargs [3];
int n;
guint32 align;
MonoMethod *memset_method;
MonoInst *size_ins = NULL;
MonoInst *bzero_ins = NULL;
static MonoMethod *bzero_method;
/* FIXME: Optimize this for the case when dest is an LDADDR */
mono_class_init_internal (klass);
if (mini_is_gsharedvt_klass (klass)) {
size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
if (!bzero_method)
bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0);
g_assert (bzero_method);
iargs [0] = dest;
iargs [1] = size_ins;
mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL);
return;
}
klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
n = mono_class_value_size (klass, &align);
if (n <= TARGET_SIZEOF_VOID_P * 8) {
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
memset_method = mini_get_memset_method ();
iargs [0] = dest;
EMIT_NEW_ICONST (cfg, iargs [1], 0);
EMIT_NEW_ICONST (cfg, iargs [2], n);
mono_emit_method_call (cfg, memset_method, iargs, NULL);
}
}
static gboolean
context_used_is_mrgctx (MonoCompile *cfg, int context_used)
{
/* gshared dim methods use an mrgctx */
if (mini_method_is_default_method (cfg->method))
return context_used != 0;
return context_used & MONO_GENERIC_CONTEXT_USED_METHOD;
}
/*
* emit_get_rgctx:
*
* Emit IR to return either the vtable or the mrgctx.
*/
static MonoInst*
emit_get_rgctx (MonoCompile *cfg, int context_used)
{
MonoMethod *method = cfg->method;
g_assert (cfg->gshared);
/* Data whose context contains method type vars is stored in the mrgctx */
if (context_used_is_mrgctx (cfg, context_used)) {
MonoInst *mrgctx_loc, *mrgctx_var;
g_assert (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX);
if (!mini_method_is_default_method (method))
g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
if (cfg->llvm_only) {
mrgctx_var = mono_get_mrgctx_var (cfg);
} else {
/* Volatile */
mrgctx_loc = mono_get_mrgctx_var (cfg);
g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
}
return mrgctx_var;
}
/*
* The rest of the entries are stored in vtable->runtime_generic_context so
* have to return a vtable.
*/
if (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX) {
MonoInst *mrgctx_loc, *mrgctx_var, *vtable_var;
int vtable_reg;
/* We are passed an mrgctx, return mrgctx->class_vtable */
if (cfg->llvm_only) {
mrgctx_var = mono_get_mrgctx_var (cfg);
} else {
mrgctx_loc = mono_get_mrgctx_var (cfg);
g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
}
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
vtable_var->type = STACK_PTR;
return vtable_var;
} else if (cfg->rgctx_access == MONO_RGCTX_ACCESS_VTABLE) {
MonoInst *vtable_loc, *vtable_var;
/* We are passed a vtable, return it */
if (cfg->llvm_only) {
vtable_var = mono_get_vtable_var (cfg);
} else {
vtable_loc = mono_get_vtable_var (cfg);
g_assert (vtable_loc->flags & MONO_INST_VOLATILE);
EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
}
vtable_var->type = STACK_PTR;
return vtable_var;
} else {
MonoInst *ins, *this_ins;
int vtable_reg;
/* We are passed a this pointer, return this->vtable */
EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ());
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
return ins;
}
}
static MonoJumpInfoRgctxEntry *
mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
{
MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
if (in_mrgctx)
res->d.method = method;
else
res->d.klass = method->klass;
res->in_mrgctx = in_mrgctx;
res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
res->data->type = patch_type;
res->data->data.target = patch_data;
res->info_type = info_type;
return res;
}
static MonoInst*
emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type);
static MonoInst*
emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
{
MonoInst *call;
MonoInst *slot_ins;
EMIT_NEW_AOTCONST (cfg, slot_ins, MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
// Can't add basic blocks during interp entry mode
if (cfg->disable_inline_rgctx_fetch || cfg->interp_entry_only) {
MonoInst *args [2] = { rgctx, slot_ins };
if (entry->in_mrgctx)
call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
else
call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
return call;
}
MonoBasicBlock *slowpath_bb, *end_bb;
MonoInst *ins, *res;
int rgctx_reg, res_reg;
/*
* rgctx = vtable->runtime_generic_context;
* if (rgctx) {
* val = rgctx [slot + 1];
* if (val)
* return val;
* }
* <slowpath>
*/
NEW_BBLOCK (cfg, end_bb);
NEW_BBLOCK (cfg, slowpath_bb);
if (entry->in_mrgctx) {
rgctx_reg = rgctx->dreg;
} else {
rgctx_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
// FIXME: Avoid this check by allocating the table when the vtable is created etc.
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
}
int table_size = mono_class_rgctx_get_array_size (0, entry->in_mrgctx);
if (entry->in_mrgctx)
table_size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_ins->dreg, table_size - 1);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBGE, slowpath_bb);
int shifted_slot_reg = alloc_ireg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISHL_IMM, shifted_slot_reg, slot_ins->dreg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
int addr_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, addr_reg, rgctx_reg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, addr_reg, addr_reg, shifted_slot_reg);
int val_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, addr_reg, TARGET_SIZEOF_VOID_P + (entry->in_mrgctx ? MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT : 0));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
res_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, val_reg);
res = ins;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, slowpath_bb);
slowpath_bb->out_of_line = TRUE;
MonoInst *args[2] = { rgctx, slot_ins };
if (entry->in_mrgctx)
call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
else
call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, call->dreg);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
return res;
}
/*
* emit_rgctx_fetch:
*
* Emit IR to load the value of the rgctx entry ENTRY from the rgctx.
*/
static MonoInst*
emit_rgctx_fetch (MonoCompile *cfg, int context_used, MonoJumpInfoRgctxEntry *entry)
{
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
if (cfg->llvm_only)
return emit_rgctx_fetch_inline (cfg, rgctx, entry);
else
return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, mono_icall_sig_ptr_ptr, &rgctx);
}
/*
* mini_emit_get_rgctx_klass:
*
* Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
* normal constants, else emit a load from the rgctx.
*/
MonoInst*
mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
if (!context_used) {
MonoInst *ins;
switch (rgctx_type) {
case MONO_RGCTX_INFO_KLASS:
EMIT_NEW_CLASSCONST (cfg, ins, klass);
return ins;
case MONO_RGCTX_INFO_VTABLE: {
MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
EMIT_NEW_VTABLECONST (cfg, ins, vtable);
return ins;
}
default:
g_assert_not_reached ();
}
}
// Its cheaper to load these from the gsharedvt info struct
if (cfg->llvm_only && cfg->gsharedvt)
return mini_emit_get_gsharedvt_info_klass (cfg, klass, rgctx_type);
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
mono_error_exit:
return NULL;
}
static MonoInst*
emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
static MonoInst*
emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoGSharedVtCall *call_info;
MonoJumpInfoRgctxEntry *entry;
call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
call_info->sig = sig;
call_info->method = cmethod;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
/*
* emit_get_rgctx_virt_method:
*
* Return data for method VIRT_METHOD for a receiver of type KLASS.
*/
static MonoInst*
emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoVirtMethod *info;
MonoJumpInfoRgctxEntry *entry;
if (context_used == -1)
context_used = mono_class_check_context_used (klass) | mono_method_check_context_used (virt_method);
info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
info->klass = klass;
info->method = virt_method;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
static MonoInst*
emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
{
MonoJumpInfoRgctxEntry *entry;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
return emit_rgctx_fetch (cfg, context_used, entry);
}
/*
* emit_get_rgctx_method:
*
* Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
* normal constants, else emit a load from the rgctx.
*/
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
if (context_used == -1)
context_used = mono_method_check_context_used (cmethod);
if (!context_used) {
MonoInst *ins;
switch (rgctx_type) {
case MONO_RGCTX_INFO_METHOD:
EMIT_NEW_METHODCONST (cfg, ins, cmethod);
return ins;
case MONO_RGCTX_INFO_METHOD_RGCTX:
EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
return ins;
case MONO_RGCTX_INFO_METHOD_FTNDESC:
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod);
return ins;
case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY:
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY, cmethod);
return ins;
default:
g_assert_not_reached ();
}
} else {
// Its cheaper to load these from the gsharedvt info struct
if (cfg->llvm_only && cfg->gsharedvt)
return emit_get_gsharedvt_info (cfg, cmethod, rgctx_type);
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
}
static MonoInst*
emit_get_rgctx_field (MonoCompile *cfg, int context_used,
MonoClassField *field, MonoRgctxInfoType rgctx_type)
{
// Its cheaper to load these from the gsharedvt info struct
if (cfg->llvm_only && cfg->gsharedvt)
return emit_get_gsharedvt_info (cfg, field, rgctx_type);
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
MonoInst*
mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type);
}
static int
get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
{
MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
MonoRuntimeGenericContextInfoTemplate *template_;
int i, idx;
g_assert (info);
for (i = 0; i < info->num_entries; ++i) {
MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
return i;
}
if (info->num_entries == info->count_entries) {
MonoRuntimeGenericContextInfoTemplate *new_entries;
int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
info->entries = new_entries;
info->count_entries = new_count_entries;
}
idx = info->num_entries;
template_ = &info->entries [idx];
template_->info_type = rgctx_type;
template_->data = data;
info->num_entries ++;
return idx;
}
/*
* emit_get_gsharedvt_info:
*
* This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
*/
static MonoInst*
emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
{
MonoInst *ins;
int idx, dreg;
idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
/* Load info->entries [idx] */
dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
return ins;
}
MonoInst*
mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type);
}
/*
* On return the caller must check @klass for load errors.
*/
static void
emit_class_init (MonoCompile *cfg, MonoClass *klass)
{
MonoInst *vtable_arg;
int context_used;
context_used = mini_class_check_context_used (cfg, klass);
if (context_used) {
vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error);
if (!is_ok (cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
MonoInst *ins;
/*
* Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
* so this doesn't have to clobber any regs and it doesn't break basic blocks.
*/
MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
ins->sreg1 = vtable_arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else {
int inited_reg;
MonoBasicBlock *inited_bb;
inited_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
NEW_BBLOCK (cfg, inited_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
cfg->cbb->out_of_line = TRUE;
mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg);
MONO_START_BB (cfg, inited_bb);
}
}
static void
emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
{
MonoInst *ins;
if (cfg->gen_seq_points && cfg->method == method) {
NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
if (nonempty_stack)
ins->flags |= MONO_INST_NONEMPTY_STACK;
MONO_ADD_INS (cfg->cbb, ins);
cfg->last_seq_point = ins;
}
}
void
mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
{
if (mini_debug_options.better_cast_details) {
int vtable_reg = alloc_preg (cfg);
int klass_reg = alloc_preg (cfg);
MonoBasicBlock *is_null_bb = NULL;
MonoInst *tls_get;
if (null_check) {
NEW_BBLOCK (cfg, is_null_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
}
tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
if (!tls_get) {
fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
exit (1);
}
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg);
if (null_check)
MONO_START_BB (cfg, is_null_bb);
}
}
void
mini_reset_cast_details (MonoCompile *cfg)
{
/* Reset the variables holding the cast details */
if (mini_debug_options.better_cast_details) {
MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
/* It is enough to reset the from field */
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
}
}
/*
* On return the caller must check @array_class for load errors
*/
static void
mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
{
int vtable_reg = alloc_preg (cfg);
int context_used;
context_used = mini_class_check_context_used (cfg, array_class);
mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (context_used) {
MonoInst *vtable_ins;
vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
} else {
if (cfg->compile_aot) {
int vt_reg;
MonoVTable *vtable;
if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
} else {
MonoVTable *vtable;
if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable);
}
}
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
mini_reset_cast_details (cfg);
}
/**
* Handles unbox of a Nullable<T>. If context_used is non zero, then shared
* generic code is generated.
*/
static MonoInst*
handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
{
MonoMethod* method;
if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass)))
method = get_method_nofail (klass, "UnboxExact", 1, 0);
else
method = get_method_nofail (klass, "Unbox", 1, 0);
g_assert (method);
if (context_used) {
MonoInst *rgctx, *addr;
/* FIXME: What if the class is shared? We might not
have to get the address of the method from the
RGCTX. */
if (cfg->llvm_only) {
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_METHOD_FTNDESC);
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method));
return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
} else {
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
rgctx = emit_get_rgctx (cfg, context_used);
return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
MonoInst *rgctx_arg = NULL;
check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
g_assert (!pass_mrgctx);
if (pass_vtable) {
MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error);
mono_error_assert_ok (cfg->error);
EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
}
return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
MonoInst*
mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used)
{
MonoInst *add;
int obj_reg;
int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
int klass_reg = alloc_dreg (cfg ,STACK_PTR);
int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
int rank_reg = alloc_dreg (cfg ,STACK_I4);
obj_reg = val->dreg;
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
/* FIXME: generics */
g_assert (m_class_get_rank (klass) == 0);
// Check rank == 0
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ());
if (context_used) {
MonoInst *element_class;
/* This assertion is from the unboxcast insn */
g_assert (m_class_get_rank (klass) == 0);
element_class = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
} else {
mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE);
mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass));
mini_reset_cast_details (cfg);
}
NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
add->type = STACK_MP;
add->klass = klass;
return add;
}
static MonoInst*
handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
{
MonoInst *addr, *klass_inst, *is_ref, *args[16];
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *ins;
int dreg, addr_reg;
klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
/* obj */
args [0] = obj;
/* klass */
args [1] = klass_inst;
/* CASTCLASS */
obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
addr_reg = alloc_dreg (cfg, STACK_MP);
/* Non-ref case */
/* UNBOX */
NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, addr);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
/* Save the ref to a temporary */
dreg = alloc_ireg (cfg);
EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass));
addr->dreg = addr_reg;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Nullable case */
MONO_START_BB (cfg, is_nullable_bb);
{
MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
unbox_sig->ret = m_class_get_byval_arg (klass);
unbox_sig->param_count = 1;
unbox_sig->params [0] = mono_get_object_type ();
if (cfg->llvm_only)
unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
else
unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass));
addr->dreg = addr_reg;
}
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* End */
MONO_START_BB (cfg, end_bb);
/* LDOBJ */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0);
return ins;
}
/*
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
{
MonoInst *iargs [2];
MonoJitICallId alloc_ftn;
if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) {
char* full_name = mono_type_get_full_name (klass);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_member_access (cfg->error, "Cannot create an abstract class: %s", full_name);
g_free (full_name);
return NULL;
}
if (context_used) {
gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
iargs [0] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VTABLE);
alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
if (managed_alloc) {
if (known_instance_size) {
int size = mono_class_instance_size (klass);
if (size < MONO_ABI_SIZEOF (MonoObject))
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_ICONST (cfg, iargs [1], size);
}
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
}
if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass)));
alloc_ftn = MONO_JIT_ICALL_mono_helper_newobj_mscorlib;
} else {
MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error);
if (!is_ok (cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return NULL;
}
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
if (managed_alloc) {
int size = mono_class_instance_size (klass);
if (size < MONO_ABI_SIZEOF (MonoObject))
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
EMIT_NEW_ICONST (cfg, iargs [1], size);
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
}
return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
}
/*
* Returns NULL and set the cfg exception on error.
*/
MonoInst*
mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
{
MonoInst *alloc, *ins;
if (G_UNLIKELY (m_class_is_byreflike (klass))) {
mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass));
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return NULL;
}
if (mono_class_is_nullable (klass)) {
MonoMethod* method = get_method_nofail (klass, "Box", 1, 0);
if (context_used) {
if (cfg->llvm_only) {
MonoMethodSignature *sig = mono_method_signature_internal (method);
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_METHOD_FTNDESC);
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
return mini_emit_llvmonly_calli (cfg, sig, &val, addr);
} else {
/* FIXME: What if the class is shared? We might not
have to get the method address from the RGCTX. */
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
MonoInst *rgctx_arg = NULL;
check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
g_assert (!pass_mrgctx);
if (pass_vtable) {
MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error);
mono_error_assert_ok (cfg->error);
EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
}
return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
if (mini_is_gsharedvt_klass (klass)) {
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *res, *is_ref, *src_var, *addr;
int dreg;
dreg = alloc_ireg (cfg);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* Non-ref case */
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
return NULL;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
ins->opcode = OP_STOREV_MEMBASE;
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
res->type = STACK_OBJ;
res->klass = klass;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
/* val is a vtype, so has to load the value manually */
src_var = get_vreg_to_inst (cfg, val->dreg);
if (!src_var)
src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg);
EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Nullable case */
MONO_START_BB (cfg, is_nullable_bb);
{
MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
MonoInst *box_call;
MonoMethodSignature *box_sig;
/*
* klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
* construct that method at JIT time, so have to do things by hand.
*/
box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
box_sig->ret = mono_get_object_type ();
box_sig->param_count = 1;
box_sig->params [0] = m_class_get_byval_arg (klass);
if (cfg->llvm_only)
box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr);
else
box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
res->type = STACK_OBJ;
res->klass = klass;
}
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
return res;
}
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
return NULL;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
return alloc;
}
static gboolean
method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
{
if (cmethod->klass == mono_defaults.systemtype_class) {
if (!strcmp (cmethod->name, "GetType"))
return TRUE;
}
/*
* In corelib code, methods which need to do a stack walk declare a StackCrawlMark local and pass it as an
* arguments until it reaches an icall. Its hard to detect which methods do that especially with
* StackCrawlMark.LookForMyCallersCaller, so for now, just hardcode the classes which contain the public
* methods whose caller is needed.
*/
if (mono_is_corlib_image (m_class_get_image (cmethod->klass))) {
const char *cname = m_class_get_name (cmethod->klass);
if (!strcmp (cname, "Assembly") ||
!strcmp (cname, "AssemblyLoadContext") ||
(!strcmp (cname, "Activator"))) {
if (!strcmp (cmethod->name, "op_Equality"))
return FALSE;
return TRUE;
}
}
return FALSE;
}
G_GNUC_UNUSED MonoInst*
mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag)
{
MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass));
guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
gboolean is_i4;
switch (enum_type->type) {
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#if SIZEOF_REGISTER == 8
case MONO_TYPE_I:
case MONO_TYPE_U:
#endif
is_i4 = FALSE;
break;
default:
is_i4 = TRUE;
break;
}
{
MonoInst *load = NULL, *and_, *cmp, *ceq;
int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int dest_reg = alloc_ireg (cfg);
if (enum_this) {
EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
} else {
g_assert (enum_val_reg != -1);
enum_reg = enum_val_reg;
}
EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
ceq->type = STACK_I4;
if (!is_i4) {
load = load ? mono_decompose_opcode (cfg, load) : NULL;
and_ = mono_decompose_opcode (cfg, and_);
cmp = mono_decompose_opcode (cfg, cmp);
ceq = mono_decompose_opcode (cfg, ceq);
}
return ceq;
}
}
static void
emit_set_deopt_il_offset (MonoCompile *cfg, int offset)
{
MonoInst *ins;
if (!(cfg->deopt && cfg->method == cfg->current_method))
return;
EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, ins->dreg, MONO_STRUCT_OFFSET (MonoMethodILState, il_offset), offset);
}
static MonoInst*
emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type)
{
MonoDelegateClassMethodPair *info;
MonoJumpInfoRgctxEntry *entry;
info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
info->klass = klass;
info->method = virt_method;
info->is_virtual = _virtual;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
/*
* Returns NULL and set the cfg exception on error.
*/
static G_GNUC_UNUSED MonoInst*
handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_)
{
MonoInst *ptr;
int dreg;
gpointer trampoline;
MonoInst *obj, *tramp_ins;
guint8 **code_slot;
if (virtual_ && !cfg->llvm_only) {
MonoMethod *invoke = mono_get_delegate_invoke_internal (klass);
g_assert (invoke);
//FIXME verify & fix any issue with removing invoke_context_used restriction
if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method))
return NULL;
}
obj = handle_alloc (cfg, klass, FALSE, invoke_context_used);
if (!obj)
return NULL;
/* Inline the contents of mono_delegate_ctor */
/* Set target field */
/* Optimize away setting of NULL target */
if (!MONO_INS_IS_PCONST_NULL (target)) {
if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
}
if (!mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
mini_emit_write_barrier (cfg, ptr, target);
}
}
/* Set method field */
if (!(target_method_context_used || invoke_context_used) && !cfg->llvm_only) {
//If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
}
if (cfg->llvm_only) {
if (virtual_) {
MonoInst *args [ ] = {
obj,
target,
emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD)
};
mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args);
return obj;
}
}
/*
* To avoid looking up the compiled code belonging to the target method
* in mono_delegate_trampoline (), we allocate a per-domain memory slot to
* store it, and we fill it after the method has been compiled.
*/
if (!method->dynamic && !cfg->llvm_only) {
MonoInst *code_slot_ins;
if (target_method_context_used) {
code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
} else {
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
jit_mm_lock (jit_mm);
if (!jit_mm->method_code_hash)
jit_mm->method_code_hash = g_hash_table_new (NULL, NULL);
code_slot = (guint8 **)g_hash_table_lookup (jit_mm->method_code_hash, method);
if (!code_slot) {
code_slot = (guint8 **)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (gpointer));
g_hash_table_insert (jit_mm->method_code_hash, method, code_slot);
}
jit_mm_unlock (jit_mm);
code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
if (target_method_context_used || invoke_context_used) {
tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO);
//This is emited as a contant store for the non-shared case.
//We copy from the delegate trampoline info as it's faster than a rgctx fetch
dreg = alloc_preg (cfg);
if (!cfg->llvm_only) {
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg);
}
} else if (cfg->compile_aot) {
MonoDelegateClassMethodPair *del_tramp;
del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
del_tramp->klass = klass;
del_tramp->method = method;
del_tramp->is_virtual = virtual_;
EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
} else {
if (virtual_)
trampoline = mono_create_delegate_virtual_trampoline (klass, method);
else
trampoline = mono_create_delegate_trampoline_info (klass, method);
EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
}
if (cfg->llvm_only) {
MonoInst *args [ ] = {
obj,
tramp_ins
};
mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, args);
return obj;
}
/* Set invoke_impl field */
if (virtual_) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
} else {
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
}
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
/* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
return obj;
}
/*
* handle_constrained_gsharedvt_call:
*
* Handle constrained calls where the receiver is a gsharedvt type.
* Return the instruction representing the call. Set the cfg exception on failure.
*/
static MonoInst*
handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
gboolean *ref_emit_widen)
{
MonoInst *ins = NULL;
gboolean emit_widen = *ref_emit_widen;
gboolean supported;
/*
* Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
* This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
* pack the arguments into an array, and do the rest of the work in in an icall.
*/
supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib));
if (supported)
supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret));
if (supported) {
if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) {
supported = TRUE;
} else {
supported = TRUE;
for (int i = 0; i < fsig->param_count; ++i) {
if (!(m_type_is_byref (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i]) || mini_is_gsharedvt_type (fsig->params [i])))
supported = FALSE;
}
}
}
if (supported) {
MonoInst *args [5];
/*
* This case handles calls to
* - object:ToString()/Equals()/GetHashCode(),
* - System.IComparable<T>:CompareTo()
* - System.IEquatable<T>:Equals ()
* plus some simple interface calls enough to support AsyncTaskMethodBuilder.
*/
if (fsig->hasthis)
args [0] = sp [0];
else
EMIT_NEW_PCONST (cfg, args [0], NULL);
args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
/* !fsig->hasthis is for the wrapper for the Object.GetType () icall or static virtual methods */
if ((fsig->hasthis || m_method_is_static (cmethod)) && fsig->param_count) {
/* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean *deref_args, gpointer *args) */
gboolean has_gsharedvt = FALSE;
for (int i = 0; i < fsig->param_count; ++i) {
if (mini_is_gsharedvt_type (fsig->params [i]))
has_gsharedvt = TRUE;
}
/* Pass an array of bools which signal whenever the corresponding argument is a gsharedvt ref type */
if (has_gsharedvt) {
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = fsig->param_count;
MONO_ADD_INS (cfg->cbb, ins);
args [3] = ins;
} else {
EMIT_NEW_PCONST (cfg, args [3], 0);
}
/* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t);
MONO_ADD_INS (cfg->cbb, ins);
args [4] = ins;
for (int i = 0; i < fsig->param_count; ++i) {
int addr_reg;
if (mini_is_gsharedvt_type (fsig->params [i])) {
MonoInst *is_deref;
int deref_arg_reg;
ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [i]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
deref_arg_reg = alloc_preg (cfg);
/* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
EMIT_NEW_BIALU_IMM (cfg, is_deref, OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, i, is_deref->dreg);
} else if (has_gsharedvt) {
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, args [3]->dreg, i, 0);
}
MonoInst *arg = sp [i + fsig->hasthis];
if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])) {
EMIT_NEW_VARLOADA_VREG (cfg, ins, arg->dreg, fsig->params [i]);
addr_reg = ins->dreg;
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg);
} else {
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), arg->dreg);
}
}
} else {
EMIT_NEW_ICONST (cfg, args [3], 0);
EMIT_NEW_ICONST (cfg, args [4], 0);
}
ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
emit_widen = FALSE;
if (mini_is_gsharedvt_type (fsig->ret)) {
ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins);
} else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) {
MonoInst *add;
/* Unbox */
NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
/* Load value */
NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
MONO_ADD_INS (cfg->cbb, ins);
/* ins represents the call result */
}
} else {
GSHAREDVT_FAILURE (CEE_CALLVIRT);
}
*ref_emit_widen = emit_widen;
return ins;
exception_exit:
return NULL;
}
static void
mono_emit_load_got_addr (MonoCompile *cfg)
{
MonoInst *getaddr, *dummy_use;
if (!cfg->got_var || cfg->got_var_allocated)
return;
MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
getaddr->cil_code = cfg->header->code;
getaddr->dreg = cfg->got_var->dreg;
/* Add it to the start of the first bblock */
if (cfg->bb_entry->code) {
getaddr->next = cfg->bb_entry->code;
cfg->bb_entry->code = getaddr;
}
else
MONO_ADD_INS (cfg->bb_entry, getaddr);
cfg->got_var_allocated = TRUE;
/*
* Add a dummy use to keep the got_var alive, since real uses might
* only be generated by the back ends.
* Add it to end_bblock, so the variable's lifetime covers the whole
* method.
* It would be better to make the usage of the got var explicit in all
* cases when the backend needs it (i.e. calls, throw etc.), so this
* wouldn't be needed.
*/
NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
MONO_ADD_INS (cfg->bb_exit, dummy_use);
}
static MonoMethod*
get_constrained_method (MonoCompile *cfg, MonoImage *image, guint32 token,
MonoMethod *cil_method, MonoClass *constrained_class,
MonoGenericContext *generic_context)
{
MonoMethod *cmethod = cil_method;
gboolean constrained_is_generic_param =
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
if (cfg->current_method->wrapper_type != MONO_WRAPPER_NONE) {
if (cfg->verbose_level > 2)
printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
if (!(constrained_is_generic_param &&
cfg->gshared)) {
cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, cfg->error);
CHECK_CFG_ERROR;
}
} else {
if (cfg->verbose_level > 2)
printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
if (constrained_is_generic_param && cfg->gshared) {
/*
* This is needed since get_method_constrained can't find
* the method in klass representing a type var.
* The type var is guaranteed to be a reference type in this
* case.
*/
if (!mini_is_gsharedvt_klass (constrained_class))
g_assert (!m_class_is_valuetype (cmethod->klass));
} else {
cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, cfg->error);
CHECK_CFG_ERROR;
}
}
return cmethod;
mono_error_exit:
return NULL;
}
static gboolean
method_does_not_return (MonoMethod *method)
{
// FIXME: Under netcore, these are decorated with the [DoesNotReturn] attribute
return m_class_get_image (method->klass) == mono_defaults.corlib &&
!strcmp (m_class_get_name (method->klass), "ThrowHelper") &&
strstr (method->name, "Throw") == method->name &&
!method->is_inflated;
}
static int inline_limit, llvm_jit_inline_limit, llvm_aot_inline_limit;
static gboolean inline_limit_inited;
static gboolean
mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
{
MonoMethodHeaderSummary header;
MonoVTable *vtable;
int limit;
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
MonoMethodSignature *sig = mono_method_signature_internal (method);
int i;
#endif
if (cfg->disable_inline)
return FALSE;
if (cfg->gsharedvt)
return FALSE;
if (cfg->inline_depth > 10)
return FALSE;
if (!mono_method_get_header_summary (method, &header))
return FALSE;
/*runtime, icall and pinvoke are checked by summary call*/
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
header.has_clauses)
return FALSE;
if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ)
/* Used to mark methods containing StackCrawlMark locals */
return FALSE;
/* also consider num_locals? */
/* Do the size check early to avoid creating vtables */
if (!inline_limit_inited) {
char *inlinelimit;
if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
inline_limit = atoi (inlinelimit);
llvm_jit_inline_limit = inline_limit;
llvm_aot_inline_limit = inline_limit;
g_free (inlinelimit);
} else {
inline_limit = INLINE_LENGTH_LIMIT;
llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT;
llvm_aot_inline_limit = LLVM_AOT_INLINE_LENGTH_LIMIT;
}
inline_limit_inited = TRUE;
}
if (COMPILE_LLVM (cfg)) {
if (cfg->compile_aot)
limit = llvm_aot_inline_limit;
else
limit = llvm_jit_inline_limit;
} else {
limit = inline_limit;
}
if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
return FALSE;
/*
* if we can initialize the class of the method right away, we do,
* otherwise we don't allow inlining if the class needs initialization,
* since it would mean inserting a call to mono_runtime_class_init()
* inside the inlined code
*/
if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass))
return FALSE;
{
/* The AggressiveInlining hint is a good excuse to force that cctor to run. */
if ((cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) || method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
if (m_class_has_cctor (method->klass)) {
ERROR_DECL (error);
vtable = mono_class_vtable_checked (method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
if (!cfg->compile_aot) {
if (!mono_runtime_class_init_full (vtable, error)) {
mono_error_cleanup (error);
return FALSE;
}
}
}
} else if (mono_class_is_before_field_init (method->klass)) {
if (cfg->run_cctors && m_class_has_cctor (method->klass)) {
ERROR_DECL (error);
/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
if (!m_class_get_runtime_vtable (method->klass))
/* No vtable created yet */
return FALSE;
vtable = mono_class_vtable_checked (method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
if (! vtable->initialized)
return FALSE;
if (!mono_runtime_class_init_full (vtable, error)) {
mono_error_cleanup (error);
return FALSE;
}
}
} else if (mono_class_needs_cctor_run (method->klass, NULL)) {
ERROR_DECL (error);
if (!m_class_get_runtime_vtable (method->klass))
/* No vtable created yet */
return FALSE;
vtable = mono_class_vtable_checked (method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
if (!vtable->initialized)
return FALSE;
}
}
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (mono_arch_is_soft_float ()) {
/* FIXME: */
if (sig->ret && sig->ret->type == MONO_TYPE_R4)
return FALSE;
for (i = 0; i < sig->param_count; ++i)
if (!m_type_is_byref (sig->params [i]) && sig->params [i]->type == MONO_TYPE_R4)
return FALSE;
}
#endif
if (g_list_find (cfg->dont_inline, method))
return FALSE;
if (mono_profiler_get_call_instrumentation_flags (method))
return FALSE;
if (mono_profiler_coverage_instrumentation_enabled (method))
return FALSE;
if (method_does_not_return (method))
return FALSE;
return TRUE;
}
static gboolean
mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
{
if (!cfg->compile_aot) {
g_assert (vtable);
if (vtable->initialized)
return FALSE;
}
if (mono_class_is_before_field_init (klass)) {
if (cfg->method == method)
return FALSE;
}
if (!mono_class_needs_cctor_run (klass, method))
return FALSE;
if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
/* The initialization is already done before the method is called */
return FALSE;
return TRUE;
}
int
mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index)
{
int index_reg = index->dreg;
int index2_reg;
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
/*
* abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
* during OP_BOUNDS_CHECK decomposition, and in the implementation
* of OP_X86_LEA for llvm.
*/
index2_reg = index_reg;
} else {
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
}
#else
if (index->type == STACK_I8) {
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
} else {
index2_reg = index_reg;
}
#endif
return index2_reg;
}
MonoInst*
mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded)
{
MonoInst *ins;
guint32 size;
int mult_reg, add_reg, array_reg, index2_reg, bounds_reg, lower_bound_reg, realidx2_reg;
int context_used;
if (mini_is_gsharedvt_variable_klass (klass)) {
size = -1;
} else {
mono_class_init_internal (klass);
size = mono_class_array_element_size (klass);
}
mult_reg = alloc_preg (cfg);
array_reg = arr->dreg;
realidx2_reg = index2_reg = mini_emit_sext_index_reg (cfg, index);
if (bounded) {
bounds_reg = alloc_preg (cfg);
lower_bound_reg = alloc_preg (cfg);
realidx2_reg = alloc_preg (cfg);
MonoBasicBlock *is_null_bb = NULL;
NEW_BBLOCK (cfg, is_null_bb);
// gint32 lower_bound = 0;
// if (arr->bounds)
// lower_bound = arr->bounds.lower_bound;
// realidx2 = index2 - lower_bound;
MONO_EMIT_NEW_PCONST (cfg, lower_bound_reg, NULL);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, lower_bound_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_START_BB (cfg, is_null_bb);
MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2_reg, lower_bound_reg);
}
if (bcheck)
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, realidx2_reg);
#if defined(TARGET_X86) || defined(TARGET_AMD64)
if (size == 1 || size == 2 || size == 4 || size == 8) {
static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
EMIT_NEW_X86_LEA (cfg, ins, array_reg, realidx2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
ins->klass = klass;
ins->type = STACK_MP;
return ins;
}
#endif
add_reg = alloc_ireg_mp (cfg);
if (size == -1) {
MonoInst *rgctx_ins;
/* gsharedvt */
g_assert (cfg->gshared);
context_used = mini_class_check_context_used (cfg, klass);
g_assert (context_used);
rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, realidx2_reg, rgctx_ins->dreg);
} else {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, realidx2_reg, size);
}
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
ins->klass = klass;
ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
static MonoInst*
mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
{
int bounds_reg = alloc_preg (cfg);
int add_reg = alloc_ireg_mp (cfg);
int mult_reg = alloc_preg (cfg);
int mult2_reg = alloc_preg (cfg);
int low1_reg = alloc_preg (cfg);
int low2_reg = alloc_preg (cfg);
int high1_reg = alloc_preg (cfg);
int high2_reg = alloc_preg (cfg);
int realidx1_reg = alloc_preg (cfg);
int realidx2_reg = alloc_preg (cfg);
int sum_reg = alloc_preg (cfg);
int index1, index2;
MonoInst *ins;
guint32 size;
mono_class_init_internal (klass);
size = mono_class_array_element_size (klass);
index1 = index_ins1->dreg;
index2 = index_ins2->dreg;
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
/* Not needed */
} else {
int tmpreg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
index1 = tmpreg;
tmpreg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
index2 = tmpreg;
}
#else
// FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
#endif
/* range checking */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
ins->type = STACK_MP;
ins->klass = klass;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
static MonoInst*
mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set)
{
int rank;
MonoInst *addr;
MonoMethod *addr_method;
int element_size;
MonoClass *eclass = m_class_get_element_class (cmethod->klass);
gboolean bounded = m_class_get_byval_arg (cmethod->klass) ? m_class_get_byval_arg (cmethod->klass)->type == MONO_TYPE_ARRAY : FALSE;
rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0);
if (rank == 1)
return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE, bounded);
/* emit_ldelema_2 depends on OP_LMUL */
if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
}
if (mini_is_gsharedvt_variable_klass (eclass))
element_size = 0;
else
element_size = mono_class_array_element_size (eclass);
addr_method = mono_marshal_get_array_address (rank, element_size);
addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
return addr;
}
static gboolean
mini_class_is_reference (MonoClass *klass)
{
return mini_type_is_reference (m_class_get_byval_arg (klass));
}
MonoInst*
mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
{
if (safety_checks && mini_class_is_reference (klass) &&
!(MONO_INS_IS_PCONST_NULL (sp [2]))) {
MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class);
MonoMethod *helper;
MonoInst *iargs [3];
if (sp [0]->type != STACK_OBJ)
return NULL;
if (sp [2]->type != STACK_OBJ)
return NULL;
iargs [2] = sp [2];
iargs [1] = sp [1];
iargs [0] = sp [0];
MonoClass *array_class = sp [0]->klass;
if (array_class && m_class_get_rank (array_class) == 1) {
MonoClass *eclass = m_class_get_element_class (array_class);
if (m_class_is_sealed (eclass)) {
helper = mono_marshal_get_virtual_stelemref (array_class);
/* Make a non-virtual call if possible */
return mono_emit_method_call (cfg, helper, iargs, NULL);
}
}
helper = mono_marshal_get_virtual_stelemref (obj_array);
if (!helper->slot)
mono_class_setup_vtable (obj_array);
g_assert (helper->slot);
return mono_emit_method_call (cfg, helper, iargs, sp [0]);
} else {
MonoInst *ins;
if (mini_is_gsharedvt_variable_klass (klass)) {
MonoInst *addr;
// FIXME-VT: OP_ICONST optimization
addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
ins->opcode = OP_STOREV_MEMBASE;
} else if (sp [1]->opcode == OP_ICONST) {
int array_reg = sp [0]->dreg;
int index_reg = sp [1]->dreg;
int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg) && sp [1]->inst_c0 < 0)
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
if (safety_checks)
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg);
} else {
MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks, FALSE);
if (!mini_debug_options.weak_memory_model && mini_class_is_reference (klass))
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
if (mini_class_is_reference (klass))
mini_emit_write_barrier (cfg, addr, sp [2]);
}
return ins;
}
}
MonoInst*
mini_emit_memory_barrier (MonoCompile *cfg, int kind)
{
MonoInst *ins = NULL;
MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
MONO_ADD_INS (cfg->cbb, ins);
ins->backend.memory_barrier_kind = kind;
return ins;
}
/*
* This entry point could be used later for arbitrary method
* redirection.
*/
inline static MonoInst*
mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
{
if (method->klass == mono_defaults.string_class) {
/* managed string allocation support */
if (strcmp (method->name, "FastAllocateString") == 0) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error);
MonoMethod *managed_alloc = NULL;
mono_error_assert_ok (cfg->error); /*Should not fail since it System.String*/
#ifndef MONO_CROSS_COMPILE
managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
#endif
if (!managed_alloc)
return NULL;
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
iargs [1] = args [0];
return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
}
}
return NULL;
}
static void
mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
{
MonoInst *store, *temp;
int i;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
/*
* FIXME: We should use *args++ = sp [0], but that would mean the arg
* would be different than the MonoInst's used to represent arguments, and
* the ldelema implementation can't deal with that.
* Solution: When ldelema is used on an inline argument, create a var for
* it, emit ldelema on that var, and emit the saving code below in
* inline_method () if needed.
*/
temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
cfg->args [i] = temp;
/* This uses cfg->args [i] which is set by the preceding line */
EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
store->cil_code = sp [0]->cil_code;
sp++;
}
}
#define MONO_INLINE_CALLED_LIMITED_METHODS 1
#define MONO_INLINE_CALLER_LIMITED_METHODS 1
#if (MONO_INLINE_CALLED_LIMITED_METHODS)
static gboolean
check_inline_called_method_name_limit (MonoMethod *called_method)
{
int strncmp_result;
static const char *limit = NULL;
if (limit == NULL) {
const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
if (limit_string != NULL)
limit = limit_string;
else
limit = "";
}
if (limit [0] != '\0') {
char *called_method_name = mono_method_full_name (called_method, TRUE);
strncmp_result = strncmp (called_method_name, limit, strlen (limit));
g_free (called_method_name);
//return (strncmp_result <= 0);
return (strncmp_result == 0);
} else {
return TRUE;
}
}
#endif
#if (MONO_INLINE_CALLER_LIMITED_METHODS)
static gboolean
check_inline_caller_method_name_limit (MonoMethod *caller_method)
{
int strncmp_result;
static const char *limit = NULL;
if (limit == NULL) {
const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
if (limit_string != NULL) {
limit = limit_string;
} else {
limit = "";
}
}
if (limit [0] != '\0') {
char *caller_method_name = mono_method_full_name (caller_method, TRUE);
strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
g_free (caller_method_name);
//return (strncmp_result <= 0);
return (strncmp_result == 0);
} else {
return TRUE;
}
}
#endif
void
mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
{
static double r8_0 = 0.0;
static float r4_0 = 0.0;
MonoInst *ins;
int t;
rtype = mini_get_underlying_type (rtype);
t = rtype->type;
if (m_type_is_byref (rtype)) {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
} else if (t == MONO_TYPE_R4) {
MONO_INST_NEW (cfg, ins, OP_R4CONST);
ins->type = STACK_R4;
ins->inst_p0 = (void*)&r4_0;
ins->dreg = dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else if (t == MONO_TYPE_R8) {
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
ins->inst_p0 = (void*)&r8_0;
ins->dreg = dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
} else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
} else {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
}
}
static void
emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
{
int t;
rtype = mini_get_underlying_type (rtype);
t = rtype->type;
if (m_type_is_byref (rtype)) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
} else if (t == MONO_TYPE_R4) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
} else if (t == MONO_TYPE_R8) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
} else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
} else {
mini_emit_init_rvar (cfg, dreg, rtype);
}
}
/* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
static void
emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
{
MonoInst *var = cfg->locals [local];
if (COMPILE_SOFT_FLOAT (cfg)) {
MonoInst *store;
int reg = alloc_dreg (cfg, (MonoStackType)var->type);
mini_emit_init_rvar (cfg, reg, type);
EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
} else {
if (init)
mini_emit_init_rvar (cfg, var->dreg, type);
else
emit_dummy_init_rvar (cfg, var->dreg, type);
}
}
int
mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
{
return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always, NULL);
}
/*
* inline_method:
*
* Return the cost of inlining CMETHOD, or zero if it should not be inlined.
*/
static int
inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty)
{
ERROR_DECL (error);
MonoInst *ins, *rvar = NULL;
MonoMethodHeader *cheader;
MonoBasicBlock *ebblock, *sbblock;
int i, costs;
MonoInst **prev_locals, **prev_args;
MonoType **prev_arg_types;
guint prev_real_offset;
GHashTable *prev_cbb_hash;
MonoBasicBlock **prev_cil_offset_to_bb;
MonoBasicBlock *prev_cbb;
const guchar *prev_ip;
guchar *prev_cil_start;
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
#if (MONO_INLINE_CALLED_LIMITED_METHODS)
if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
return 0;
#endif
#if (MONO_INLINE_CALLER_LIMITED_METHODS)
if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
return 0;
#endif
if (!fsig)
fsig = mono_method_signature_internal (cmethod);
if (cfg->verbose_level > 2)
printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
if (!cmethod->inline_info) {
cfg->stat_inlineable_methods++;
cmethod->inline_info = 1;
}
if (is_empty)
*is_empty = FALSE;
/* allocate local variables */
cheader = mono_method_get_header_checked (cmethod, error);
if (!cheader) {
if (inline_always) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_move (cfg->error, error);
} else {
mono_error_cleanup (error);
}
return 0;
}
if (is_empty && cheader->code_size == 1 && cheader->code [0] == CEE_RET)
*is_empty = TRUE;
/* allocate space to store the return value */
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
}
prev_locals = cfg->locals;
cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
for (i = 0; i < cheader->num_locals; ++i)
cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
/* allocate start and end blocks */
/* This is needed so if the inline is aborted, we can clean up */
NEW_BBLOCK (cfg, sbblock);
sbblock->real_offset = real_offset;
NEW_BBLOCK (cfg, ebblock);
ebblock->block_num = cfg->num_bblocks++;
ebblock->real_offset = real_offset;
prev_args = cfg->args;
prev_arg_types = cfg->arg_types;
prev_ret_var_set = cfg->ret_var_set;
prev_real_offset = cfg->real_offset;
prev_cbb_hash = cfg->cbb_hash;
prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
prev_cil_start = cfg->cil_start;
prev_ip = cfg->ip;
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
prev_disable_inline = cfg->disable_inline;
cfg->ret_var_set = FALSE;
cfg->inline_depth ++;
if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
virtual_ = TRUE;
costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
ret_var_set = cfg->ret_var_set;
cfg->real_offset = prev_real_offset;
cfg->cbb_hash = prev_cbb_hash;
cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
cfg->cil_start = prev_cil_start;
cfg->ip = prev_ip;
cfg->locals = prev_locals;
cfg->args = prev_args;
cfg->arg_types = prev_arg_types;
cfg->current_method = prev_current_method;
cfg->generic_context = prev_generic_context;
cfg->ret_var_set = prev_ret_var_set;
cfg->disable_inline = prev_disable_inline;
cfg->inline_depth --;
if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
if (cfg->verbose_level > 2)
printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
mono_error_assert_ok (cfg->error);
cfg->stat_inlined_methods++;
/* always add some code to avoid block split failures */
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (prev_cbb, ins);
prev_cbb->next_bb = sbblock;
link_bblock (cfg, prev_cbb, sbblock);
/*
* Get rid of the begin and end bblocks if possible to aid local
* optimizations.
*/
if (prev_cbb->out_count == 1)
mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
MonoBasicBlock *prev = ebblock->in_bb [0];
if (prev->next_bb == ebblock) {
mono_merge_basic_blocks (cfg, prev, ebblock);
cfg->cbb = prev;
if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
mono_merge_basic_blocks (cfg, prev_cbb, prev);
cfg->cbb = prev_cbb;
}
} else {
/* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
cfg->cbb = ebblock;
}
} else {
/*
* Its possible that the rvar is set in some prev bblock, but not in others.
* (#1835).
*/
if (rvar) {
MonoBasicBlock *bb;
for (i = 0; i < ebblock->in_count; ++i) {
bb = ebblock->in_bb [i];
if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
cfg->cbb = bb;
mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret);
}
}
}
cfg->cbb = ebblock;
}
if (rvar) {
/*
* If the inlined method contains only a throw, then the ret var is not
* set, so set it to a dummy value.
*/
if (!ret_var_set)
mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret);
EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
*sp++ = ins;
}
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
return costs + 1;
} else {
if (cfg->verbose_level > 2) {
const char *msg = mono_error_get_message (cfg->error);
printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : "");
}
cfg->exception_type = MONO_EXCEPTION_NONE;
clear_cfg_error (cfg);
/* This gets rid of the newly added bblocks */
cfg->cbb = prev_cbb;
}
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
return 0;
}
/*
* Some of these comments may well be out-of-date.
* Design decisions: we do a single pass over the IL code (and we do bblock
* splitting/merging in the few cases when it's required: a back jump to an IL
* address that was not already seen as bblock starting point).
* Code is validated as we go (full verification is still better left to metadata/verify.c).
* Complex operations are decomposed in simpler ones right away. We need to let the
* arch-specific code peek and poke inside this process somehow (except when the
* optimizations can take advantage of the full semantic info of coarse opcodes).
* All the opcodes of the form opcode.s are 'normalized' to opcode.
* MonoInst->opcode initially is the IL opcode or some simplification of that
* (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
* opcode with value bigger than OP_LAST.
* At this point the IR can be handed over to an interpreter, a dumb code generator
* or to the optimizing code generator that will translate it to SSA form.
*
* Profiling directed optimizations.
* We may compile by default with few or no optimizations and instrument the code
* or the user may indicate what methods to optimize the most either in a config file
* or through repeated runs where the compiler applies offline the optimizations to
* each method and then decides if it was worth it.
*/
#define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
#define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
#define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED
#define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
#define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
#define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED
#define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
#define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
/* offset from br.s -> br like opcodes */
#define BIG_BRANCH_OFFSET 13
static gboolean
ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
{
MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
return b == NULL || b == bb;
}
static int
get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos)
{
guchar *ip = start;
guchar *target;
int i;
guint cli_addr;
MonoBasicBlock *bblock;
const MonoOpcode *opcode;
while (ip < end) {
cli_addr = ip - start;
i = mono_opcode_value ((const guint8 **)&ip, end);
if (i < 0)
UNVERIFIED;
opcode = &mono_opcodes [i];
switch (opcode->argument) {
case MonoInlineNone:
ip++;
break;
case MonoInlineString:
case MonoInlineType:
case MonoInlineField:
case MonoInlineMethod:
case MonoInlineTok:
case MonoInlineSig:
case MonoShortInlineR:
case MonoInlineI:
ip += 5;
break;
case MonoInlineVar:
ip += 3;
break;
case MonoShortInlineVar:
case MonoShortInlineI:
ip += 2;
break;
case MonoShortInlineBrTarget:
target = start + cli_addr + 2 + (signed char)ip [1];
GET_BBLOCK (cfg, bblock, target);
ip += 2;
if (ip < end)
GET_BBLOCK (cfg, bblock, ip);
break;
case MonoInlineBrTarget:
target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
GET_BBLOCK (cfg, bblock, target);
ip += 5;
if (ip < end)
GET_BBLOCK (cfg, bblock, ip);
break;
case MonoInlineSwitch: {
guint32 n = read32 (ip + 1);
guint32 j;
ip += 5;
cli_addr += 5 + 4 * n;
target = start + cli_addr;
GET_BBLOCK (cfg, bblock, target);
for (j = 0; j < n; ++j) {
target = start + cli_addr + (gint32)read32 (ip);
GET_BBLOCK (cfg, bblock, target);
ip += 4;
}
break;
}
case MonoInlineR:
case MonoInlineI8:
ip += 9;
break;
default:
g_assert_not_reached ();
}
if (i == CEE_THROW) {
guchar *bb_start = ip - 1;
/* Find the start of the bblock containing the throw */
bblock = NULL;
while ((bb_start >= start) && !bblock) {
bblock = cfg->cil_offset_to_bb [(bb_start) - start];
bb_start --;
}
if (bblock)
bblock->out_of_line = 1;
}
}
return 0;
unverified:
exception_exit:
*pos = ip;
return 1;
}
static MonoMethod *
mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
{
MonoMethod *method;
error_init (error);
if (m->wrapper_type != MONO_WRAPPER_NONE) {
method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
if (context) {
method = mono_class_inflate_generic_method_checked (method, context, error);
}
} else {
method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error);
}
return method;
}
static MonoMethod *
mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
{
ERROR_DECL (error);
MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? cfg->error : error);
if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) {
mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared");
method = NULL;
}
if (!method && !cfg)
mono_error_cleanup (error); /* FIXME don't swallow the error */
return method;
}
static MonoMethodSignature*
mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
{
MonoMethodSignature *fsig;
error_init (error);
if (method->wrapper_type != MONO_WRAPPER_NONE) {
fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
} else {
fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error);
return_val_if_nok (error, NULL);
}
if (context) {
fsig = mono_inflate_generic_signature(fsig, context, error);
}
return fsig;
}
/*
* Return the original method is a wrapper is specified. We can only access
* the custom attributes from the original method.
*/
static MonoMethod*
get_original_method (MonoMethod *method)
{
if (method->wrapper_type == MONO_WRAPPER_NONE)
return method;
/* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
return NULL;
/* in other cases we need to find the original method */
return mono_marshal_method_from_wrapper (method);
}
static guchar*
il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op)
// If ip is desired_il_op, return the next ip, else NULL.
{
if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) {
MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid;
// mono_opcode_value_and_size updates ip, but not in the expected way.
const guchar *temp_ip = ip;
const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op);
return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL;
}
return NULL;
}
static guchar*
il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token)
{
ip = il_read_op (ip, end, first_byte, desired_il_op);
if (ip)
*token = read32 (ip - 4); // could be +1 or +2 from start
return ip;
}
static guchar*
il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target)
{
ip = il_read_op (ip, end, first_byte, desired_il_op);
if (ip) {
gint32 delta = 0;
switch (size) {
case 1:
delta = (signed char)ip [-1];
break;
case 4:
delta = (gint32)read32 (ip - 4);
break;
}
// FIXME verify it is within the function and start of an instruction.
*target = ip + delta;
return ip;
}
return NULL;
}
#define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target))
#define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target))
#define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target))
#define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target))
#define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP))
#define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token))
#define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token))
#define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token))
#define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token))
#define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token))
#define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token))
#define il_read_unbox_any(ip, end, token) (il_read_op_and_token (ip, end, CEE_UNBOX_ANY, MONO_CEE_UNBOX_ANY, token))
/*
* Check that the IL instructions at ip are the array initialization
* sequence and return the pointer to the data and the size.
*/
static const char*
initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip,
guchar *end, MonoClass *klass, guint32 len, int *out_size,
guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip)
{
/*
* newarr[System.Int32]
* dup
* ldtoken field valuetype ...
* call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
*/
guint32 token;
guint32 field_token;
if ((ip = il_read_dup (ip, end))
&& ip_in_bb (cfg, cfg->cbb, ip)
&& (ip = il_read_ldtoken (ip, end, &field_token))
&& IS_FIELD_DEF (field_token)
&& ip_in_bb (cfg, cfg->cbb, ip)
&& (ip = il_read_call (ip, end, &token))) {
ERROR_DECL (error);
guint32 rva;
const char *data_ptr;
int size = 0;
MonoMethod *cmethod;
MonoClass *dummy_class;
MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error);
int dummy_align;
if (!field) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
return NULL;
}
*out_field_token = field_token;
cmethod = mini_get_method (NULL, method, token, NULL, NULL);
if (!cmethod)
return NULL;
if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib)
return NULL;
switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
size = 1; break;
/* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
#if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
case MONO_TYPE_I2:
case MONO_TYPE_U2:
size = 2; break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_R4:
size = 4; break;
case MONO_TYPE_R8:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
size = 8; break;
#endif
default:
return NULL;
}
size *= len;
if (size > mono_type_size (field->type, &dummy_align))
return NULL;
*out_size = size;
/*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
MonoImage *method_klass_image = m_class_get_image (method->klass);
if (!image_is_dynamic (method_klass_image)) {
guint32 field_index = mono_metadata_token_index (field_token);
mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL);
data_ptr = mono_image_rva_map (method_klass_image, rva);
/*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
/* for aot code we do the lookup on load */
if (aot && data_ptr)
data_ptr = (const char *)GUINT_TO_POINTER (rva);
} else {
/*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
g_assert (!aot);
data_ptr = mono_field_get_data (field);
}
if (!data_ptr)
return NULL;
*il_op = MONO_CEE_CALL;
*next_ip = ip;
return data_ptr;
}
return NULL;
}
static void
set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip)
{
ERROR_DECL (error);
char *method_fname = mono_method_full_name (method, TRUE);
char *method_code;
MonoMethodHeader *header = mono_method_get_header_checked (method, error);
if (!header) {
method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error));
mono_error_cleanup (error);
} else if (header->code_size == 0)
method_code = g_strdup ("method body is empty.");
else
method_code = mono_disasm_code_one (NULL, method, ip, NULL);
mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
g_free (method_fname);
g_free (method_code);
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
guint32
mono_type_to_stloc_coerce (MonoType *type)
{
if (m_type_is_byref (type))
return 0;
type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
return OP_ICONV_TO_I1;
case MONO_TYPE_U1:
return OP_ICONV_TO_U1;
case MONO_TYPE_I2:
return OP_ICONV_TO_I2;
case MONO_TYPE_U2:
return OP_ICONV_TO_U2;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_TYPEDBYREF:
case MONO_TYPE_GENERICINST:
return 0;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
}
return 0;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
return 0;
default:
g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
}
return -1;
}
static void
emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
{
MonoInst *ins;
guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
if (coerce_op) {
if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
if (cfg->verbose_level > 2)
printf ("Found existing coercing is enough for stloc\n");
} else {
MONO_INST_NEW (cfg, ins, coerce_op);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I4;
ins->klass = mono_class_from_mono_type_internal (header->locals [n]);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
}
guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
if (!cfg->deopt && (opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
/* Optimize reg-reg moves away */
/*
* Can't optimize other opcodes, since sp[0] might point to
* the last ins of a decomposed opcode.
*/
sp [0]->dreg = (cfg)->locals [n]->dreg;
} else {
EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
}
}
static void
emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
{
MonoInst *ins;
guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
if (coerce_op) {
if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
if (cfg->verbose_level > 2)
printf ("Found existing coercing is enough for starg\n");
} else {
MONO_INST_NEW (cfg, ins, coerce_op);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I4;
ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
}
EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
}
/*
* ldloca inhibits many optimizations so try to get rid of it in common
* cases.
*/
static guchar *
emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local)
{
guint32 token;
MonoClass *klass;
MonoType *type;
guchar *start = ip;
if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) {
/* From the INITOBJ case */
klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
CHECK_TYPELOAD (klass);
type = mini_get_underlying_type (m_class_get_byval_arg (klass));
emit_init_local (cfg, local, type, TRUE);
return ip;
}
exception_exit:
return NULL;
}
static MonoInst*
handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res)
{
/*
* Devirt EqualityComparer.Default.Equals () calls for some types.
* The corefx code excepts these calls to be devirtualized.
* This depends on the implementation of EqualityComparer.Default, which is
* in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs
*/
if (m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
!strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") &&
!strcmp (cmethod->name, "get_Default")) {
MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0];
MonoClass *inst;
MonoGenericContext ctx;
ERROR_DECL (error);
memset (&ctx, 0, sizeof (ctx));
MonoType *args [ ] = { param_type };
ctx.class_inst = mono_metadata_get_generic_inst (1, args);
inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error);
mono_error_assert_ok (error);
/* EqualityComparer<T>.Default returns specific types depending on T */
// FIXME: Add more
/* 1. Implements IEquatable<T> */
/*
* Can't use this for string/byte as it might use a different comparer:
*
* // Specialize type byte for performance reasons
* if (t == typeof(byte)) {
* return (EqualityComparer<T>)(object)(new ByteEqualityComparer());
* }
* #if MOBILE
* // Breaks .net serialization compatibility
* if (t == typeof (string))
* return (EqualityComparer<T>)(object)new InternalStringComparer ();
* #endif
*/
if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_U1 && param_type->type != MONO_TYPE_STRING) {
MonoInst *typed_objref;
MonoClass *gcomparer_inst;
memset (&ctx, 0, sizeof (ctx));
args [0] = param_type;
ctx.class_inst = mono_metadata_get_generic_inst (1, args);
MonoClass *gcomparer = mono_class_get_geqcomparer_class ();
g_assert (gcomparer);
gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error);
if (is_ok (error)) {
MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF);
typed_objref->type = STACK_OBJ;
typed_objref->dreg = alloc_ireg_ref (cfg);
typed_objref->sreg1 = call_res->dreg;
typed_objref->klass = gcomparer_inst;
MONO_ADD_INS (cfg->cbb, typed_objref);
call_res = typed_objref;
/* Force decompose */
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
}
}
}
return call_res;
}
static gboolean
is_exception_class (MonoClass *klass)
{
if (G_LIKELY (m_class_get_supertypes (klass)))
return mono_class_has_parent_fast (klass, mono_defaults.exception_class);
while (klass) {
if (klass == mono_defaults.exception_class)
return TRUE;
klass = m_class_get_parent (klass);
}
return FALSE;
}
/*
* is_jit_optimizer_disabled:
*
* Determine whenever M's assembly has a DebuggableAttribute with the
* IsJITOptimizerDisabled flag set.
*/
static gboolean
is_jit_optimizer_disabled (MonoMethod *m)
{
MonoAssembly *ass = m_class_get_image (m->klass)->assembly;
g_assert (ass);
if (ass->jit_optimizer_disabled_inited)
return ass->jit_optimizer_disabled;
return mono_assembly_is_jit_optimizer_disabled (ass);
}
gboolean
mono_is_supported_tailcall_helper (gboolean value, const char *svalue)
{
if (!value)
mono_tailcall_print ("%s %s\n", __func__, svalue);
return value;
}
static gboolean
mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod)
{
// Return value, printing if it inhibits tailcall.
if (value && mono_tailcall_print_enabled ()) {
const char *lparen = strchr (svalue, ' ') ? "(" : "";
const char *rparen = *lparen ? ")" : "";
mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value);
}
return value;
}
#define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod))
static gboolean
is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig,
gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli)
{
// Some checks apply to "regular", some to "calli", some to both.
// To ease burden on caller, always compute regular and calli.
gboolean tailcall = TRUE;
gboolean tailcall_calli = TRUE;
if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase))
tailcall = FALSE;
if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg))
tailcall_calli = FALSE;
if (!tailcall && !tailcall_calli)
goto exit;
// FIXME in calli, there is no type for for the this parameter,
// so we assume it might be valuetype; in future we should issue a range
// check, so rule out pointing to frame (for other reference parameters also)
if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check?
|| IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
|| IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli)
|| IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf)
|| IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check
|| IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
// http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/
//
// 1. Non-generic non-static methods of reference types have access to the
// RGCTX via the "this" argument (this->vtable->rgctx).
// 2. a Non-generic static methods of reference types and b. non-generic methods
// of value types need to be passed a pointer to the caller's class's VTable in the MONO_ARCH_RGCTX_REG register.
// 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register
//
// That is what vtable_arg is here (always?).
//
// Passing vtable_arg uses (requires?) a volatile non-parameter register,
// such as AMD64 rax, r10, r11, or the return register on many architectures.
// ARM32 does not always clearly have such a register. ARM32's return register
// is a parameter register.
// iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly
// important. Linux/arm32 is less clear.
// ARM32's scratch r12 might work but only with much collateral change.
//
// Imagine F1 calls F2, and F2 tailcalls F3.
// F2 and F3 are managed. F1 is native.
// Without a tailcall, F2 can save and restore everything needed for F1.
// However if the extra parameter were in a non-volatile, such as ARM32 V5/R8,
// F3 cannot easily restore it for F1, in the current scheme. The current
// scheme where the extra parameter is not merely an extra parameter, but
// passed "outside of the ABI".
//
// If all native to managed transitions are intercepted and wrapped (w/o tailcall),
// then they can preserve this register and the rest of the managed callgraph
// treat it as volatile.
//
// Interface method dispatch has the same problem (imt_arg).
|| IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register)
|| IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt)
) {
tailcall_calli = FALSE;
tailcall = FALSE;
goto exit;
}
for (int i = 0; i < fsig->param_count; ++i) {
if (IS_NOT_SUPPORTED_TAILCALL (m_type_is_byref (fsig->params [i]) || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) {
tailcall_calli = FALSE;
tailcall = FALSE; // These can point to the current method's stack. Emit range check?
goto exit;
}
}
MonoMethodSignature *caller_signature;
MonoMethodSignature *callee_signature;
caller_signature = mono_method_signature_internal (method);
callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig;
g_assert (caller_signature);
g_assert (callee_signature);
// Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped.
// The main troublesome conversions are double <=> float.
// CoreCLR allows some conversions here, such as integer truncation.
// As well I <=> I[48] and U <=> U[48] would be ok, for matching size.
if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type)
|| IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) {
tailcall_calli = FALSE;
tailcall = FALSE;
goto exit;
}
/* Debugging support */
#if 0
if (!mono_debug_count ()) {
tailcall_calli = FALSE;
tailcall = FALSE;
goto exit;
}
#endif
// See check_sp in mini_emit_calli_full.
if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg)))
tailcall_calli = FALSE;
exit:
mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n",
mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli,
cfg->gshared, extra_arg, virtual_);
*ptailcall_calli = tailcall_calli;
return tailcall;
}
/*
* is_addressable_valuetype_load
*
* Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
*/
static gboolean
is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype)
{
/* Avoid loading a struct just to load one of its fields */
gboolean is_load_instruction = (*ip == CEE_LDFLD);
gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip);
gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype);
return is_load_instruction && is_in_previous_bb && is_struct;
}
/*
* handle_ctor_call:
*
* Handle calls made to ctors from NEWOBJ opcodes.
*/
static void
handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
MonoInst **sp, guint8 *ip, int *inline_costs)
{
MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
g_assert (MONO_TYPE_IS_VOID (fsig->ret));
CHECK_CFG_EXCEPTION;
return;
}
if (mono_class_generic_sharing_enabled (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE)) {
MonoRgctxAccess access = mini_get_rgctx_access_for_method (cmethod);
if (access == MONO_RGCTX_ACCESS_MRGCTX) {
mono_class_vtable_checked (cmethod->klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
vtable_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
} else if (access == MONO_RGCTX_ACCESS_VTABLE) {
vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
cmethod->klass, MONO_RGCTX_INFO_VTABLE);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
} else {
g_assert (access == MONO_RGCTX_ACCESS_THIS);
}
}
/* Avoid virtual calls to ctors if possible */
if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
mono_method_check_inlining (cfg, cmethod) &&
!mono_class_is_subclass_of_internal (cmethod->klass, mono_defaults.exception_class, FALSE)) {
int costs;
if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, NULL))) {
cfg->real_offset += 5;
*inline_costs += costs - 5;
} else {
INLINE_FAILURE ("inline failure");
// FIXME-VT: Clean this up
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE(*ip);
mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
}
} else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
MonoInst *addr;
addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
if (cfg->llvm_only) {
// FIXME: Avoid initializing vtable_arg
mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
}
} else if (context_used &&
((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
!mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
MonoInst *cmethod_addr;
/* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
if (cfg->llvm_only) {
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
MONO_RGCTX_INFO_METHOD_FTNDESC);
mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
cmethod_addr = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
}
} else {
INLINE_FAILURE ("ctor call");
ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
callvirt_this_arg, NULL, vtable_arg);
}
exception_exit:
mono_error_exit:
return;
}
typedef struct {
MonoMethod *method;
gboolean inst_tailcall;
} HandleCallData;
/*
* handle_constrained_call:
*
* Handle constrained calls. Return a MonoInst* representing the call or NULL.
* May overwrite sp [0] and modify the ref_... parameters.
*/
static MonoInst*
handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp,
HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen)
{
MonoInst *ins, *addr;
MonoMethod *method = cdata->method;
gboolean constrained_partial_call = FALSE;
gboolean constrained_is_generic_param =
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
MonoType *gshared_constraint = NULL;
if (constrained_is_generic_param && cfg->gshared) {
if (!mini_is_gsharedvt_klass (constrained_class)) {
g_assert (!m_class_is_valuetype (cmethod->klass));
if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class)))
constrained_partial_call = TRUE;
MonoType *t = m_class_get_byval_arg (constrained_class);
MonoGenericParam *gparam = t->data.generic_param;
gshared_constraint = gparam->gshared_constraint;
}
}
if (mini_is_gsharedvt_klass (constrained_class)) {
if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) {
/* The 'Own method' case below */
} else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) {
/* 'The type parameter is instantiated as a reference type' case below. */
} else {
ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen);
CHECK_CFG_EXCEPTION;
g_assert (ins);
if (cdata->inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name);
return ins;
}
}
if (m_method_is_static (cmethod)) {
/* Call to an abstract static method, handled normally */
return NULL;
} else if (constrained_partial_call) {
gboolean need_box = TRUE;
/*
* The receiver is a valuetype, but the exact type is not known at compile time. This means the
* called method is not known at compile time either. The called method could end up being
* one of the methods on the parent classes (object/valuetype/enum), in which case we need
* to box the receiver.
* A simple solution would be to box always and make a normal virtual call, but that would
* be bad performance wise.
*/
if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass) &&
(cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
/*
* The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing necessary.
*/
/* If the method is not abstract, it's a default interface method, and we need to box */
need_box = FALSE;
}
if (gshared_constraint && MONO_TYPE_IS_PRIMITIVE (gshared_constraint) && cmethod->klass == mono_defaults.object_class &&
!strcmp (cmethod->name, "GetHashCode")) {
/*
* The receiver is constrained to a primitive type or an enum with the same basetype.
* Enum.GetHashCode () returns the hash code of the underlying type (see comments in Enum.cs),
* so the constrained call can be replaced with a normal call to the basetype GetHashCode ()
* method.
*/
MonoClass *gshared_constraint_class = mono_class_from_mono_type_internal (gshared_constraint);
cmethod = get_method_nofail (gshared_constraint_class, cmethod->name, 0, 0);
g_assert (cmethod);
*ref_cmethod = cmethod;
*ref_virtual = FALSE;
if (cfg->verbose_level)
printf (" -> %s\n", mono_method_get_full_name (cmethod));
return NULL;
}
if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) {
/* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
} else if (need_box) {
MonoInst *box_type;
MonoBasicBlock *is_ref_bb, *end_bb;
MonoInst *nonbox_call, *addr;
/*
* Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
* if needed.
* FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
* the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
*/
addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, end_bb);
box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
/* Non-ref case */
if (cfg->llvm_only)
/* addr is an ftndesc in this case */
nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
if (cfg->llvm_only)
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
cfg->cbb = end_bb;
nonbox_call->dreg = ins->dreg;
if (cdata->inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name);
return ins;
} else {
g_assert (mono_class_is_interface (cmethod->klass));
addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
if (cfg->llvm_only)
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
if (cdata->inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name);
return ins;
}
} else if (!m_class_is_valuetype (constrained_class)) {
int dreg = alloc_ireg_ref (cfg);
/*
* The type parameter is instantiated as a reference
* type. We have a managed pointer on the stack, so
* we need to dereference it here.
*/
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
ins->type = STACK_OBJ;
sp [0] = ins;
} else if (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class) {
/*
* The type parameter is instantiated as a valuetype,
* but that type doesn't override the method we're
* calling, so we need to box `this'.
*/
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
} else {
if (cmethod->klass != constrained_class) {
/* Enums/default interface methods */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
}
*ref_virtual = FALSE;
}
exception_exit:
return NULL;
}
static void
emit_setret (MonoCompile *cfg, MonoInst *val)
{
MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
MonoInst *ins;
if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
MonoInst *ret_addr;
if (!cfg->vret_addr) {
EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
} else {
EMIT_NEW_RETLOADA (cfg, ret_addr);
MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type);
if (MONO_CLASS_IS_SIMD (cfg, ret_class))
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg);
else
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
ins->klass = ret_class;
}
} else {
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg) && !m_type_is_byref (ret_type) && ret_type->type == MONO_TYPE_R4) {
MonoInst *conv;
MonoInst *iargs [ ] = { val };
conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
mono_arch_emit_setret (cfg, cfg->method, conv);
} else {
mono_arch_emit_setret (cfg, cfg->method, val);
}
#else
mono_arch_emit_setret (cfg, cfg->method, val);
#endif
}
}
/*
* Emit a call to enter the interpreter for methods with filter clauses.
*/
static void
emit_llvmonly_interp_entry (MonoCompile *cfg, MonoMethodHeader *header)
{
MonoInst *ins;
MonoInst **iargs;
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
MonoInst *ftndesc;
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
/*
* Emit a call to the interp entry function. We emit it here instead of the llvm backend since
* calling conventions etc. are easier to handle here. The LLVM backend will only emit the
* entry/exit bblocks.
*/
g_assert (cfg->cbb == cfg->bb_init);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (sig)) {
/*
* Would have to generate a gsharedvt out wrapper which calls the interp entry wrapper, but
* the gsharedvt out wrapper might not exist if the caller is also a gsharedvt method since
* the concrete signature of the call might not exist in the program.
* So transition directly to the interpreter without the wrappers.
*/
MonoInst *args_ins;
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = sig->param_count * sizeof (target_mgreg_t);
MONO_ADD_INS (cfg->cbb, ins);
args_ins = ins;
for (int i = 0; i < sig->hasthis + sig->param_count; ++i) {
MonoInst *arg_addr_ins;
EMIT_NEW_VARLOADA ((cfg), arg_addr_ins, cfg->args [i], cfg->arg_types [i]);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args_ins->dreg, i * sizeof (target_mgreg_t), arg_addr_ins->dreg);
}
MonoInst *ret_var = NULL;
MonoInst *ret_arg_ins;
if (!MONO_TYPE_IS_VOID (sig->ret)) {
ret_var = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
EMIT_NEW_VARLOADA (cfg, ret_arg_ins, ret_var, sig->ret);
} else {
EMIT_NEW_PCONST (cfg, ret_arg_ins, NULL);
}
iargs = g_newa (MonoInst*, 3);
iargs [0] = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_INTERP_METHOD);
iargs [1] = ret_arg_ins;
iargs [2] = args_ins;
mono_emit_jit_icall_id (cfg, MONO_JIT_ICALL_mini_llvmonly_interp_entry_gsharedvt, iargs);
if (!MONO_TYPE_IS_VOID (sig->ret))
EMIT_NEW_VARLOAD (cfg, ins, ret_var, sig->ret);
else
ins = NULL;
} else {
/* Obtain the interp entry function */
ftndesc = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY);
/* Call it */
iargs = g_newa (MonoInst*, sig->param_count + 1);
for (int i = 0; i < sig->param_count + sig->hasthis; ++i)
EMIT_NEW_ARGLOAD (cfg, iargs [i], i);
ins = mini_emit_llvmonly_calli (cfg, sig, iargs, ftndesc);
}
/* Do a normal return */
if (cfg->ret) {
emit_setret (cfg, ins);
/*
* Since only bb_entry/bb_exit is emitted if interp_entry_only is set,
* its possible that the return value becomes an OP_PHI node whose inputs
* are not emitted. Make it volatile to prevent that.
*/
cfg->ret->flags |= MONO_INST_VOLATILE;
}
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = cfg->bb_exit;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, cfg->bb_exit);
}
typedef union _MonoOpcodeParameter {
gint32 i32;
gint64 i64;
float f;
double d;
guchar *branch_target;
} MonoOpcodeParameter;
typedef struct _MonoOpcodeInfo {
guint constant : 4; // private
gint pops : 3; // public -1 means variable
gint pushes : 3; // public -1 means variable
} MonoOpcodeInfo;
static const MonoOpcodeInfo*
mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter)
{
#define Push0 (0)
#define Pop0 (0)
#define Push1 (1)
#define Pop1 (1)
#define PushI (1)
#define PopI (1)
#define PushI8 (1)
#define PopI8 (1)
#define PushRef (1)
#define PopRef (1)
#define PushR4 (1)
#define PopR4 (1)
#define PushR8 (1)
#define PopR8 (1)
#define VarPush (-1)
#define VarPop (-1)
static const MonoOpcodeInfo mono_opcode_info [ ] = {
#define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes },
#include "mono/cil/opcode.def"
#undef OPDEF
};
#undef Push0
#undef Pop0
#undef Push1
#undef Pop1
#undef PushI
#undef PopI
#undef PushI8
#undef PopI8
#undef PushRef
#undef PopRef
#undef PushR4
#undef PopR4
#undef PushR8
#undef PopR8
#undef VarPush
#undef VarPop
gint32 delta;
guchar *next_ip = ip + op_size;
const MonoOpcodeInfo *info = &mono_opcode_info [il_op];
switch (mono_opcodes [il_op].argument) {
case MonoInlineNone:
parameter->i32 = (int)info->constant - 1;
break;
case MonoInlineString:
case MonoInlineType:
case MonoInlineField:
case MonoInlineMethod:
case MonoInlineTok:
case MonoInlineSig:
case MonoShortInlineR:
case MonoInlineI:
parameter->i32 = read32 (next_ip - 4);
// FIXME check token type?
break;
case MonoShortInlineI:
parameter->i32 = (signed char)next_ip [-1];
break;
case MonoInlineVar:
parameter->i32 = read16 (next_ip - 2);
break;
case MonoShortInlineVar:
parameter->i32 = next_ip [-1];
break;
case MonoInlineR:
case MonoInlineI8:
parameter->i64 = read64 (next_ip - 8);
break;
case MonoShortInlineBrTarget:
delta = (signed char)next_ip [-1];
goto branch_target;
case MonoInlineBrTarget:
delta = (gint32)read32 (next_ip - 4);
branch_target:
parameter->branch_target = delta + next_ip;
break;
case MonoInlineSwitch: // complicated
break;
default:
g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument);
}
return info;
}
/*
* mono_method_to_ir:
*
* Translate the .net IL into linear IR.
*
* @start_bblock: if not NULL, the starting basic block, used during inlining.
* @end_bblock: if not NULL, the ending basic block, used during inlining.
* @return_var: if not NULL, the place where the return value is stored, used during inlining.
* @inline_args: if not NULL, contains the arguments to the inline call
* @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
* @is_virtual_call: whether this method is being called as a result of a call to callvirt
*
* This method is used to turn ECMA IL into Mono's internal Linear IR
* reprensetation. It is used both for entire methods, as well as
* inlining existing methods. In the former case, the @start_bblock,
* @end_bblock, @return_var, @inline_args are all set to NULL, and the
* inline_offset is set to zero.
*
* Returns: the inline cost, or -1 if there was an error processing this method.
*/
int
mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
MonoInst *return_var, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call)
{
ERROR_DECL (error);
// Buffer to hold parameters to mono_new_array, instead of varargs.
MonoInst *array_new_localalloc_ins = NULL;
MonoInst *ins, **sp, **stack_start;
MonoBasicBlock *tblock = NULL;
MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL;
MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
MonoMethod *method_definition;
MonoInst **arg_array;
MonoMethodHeader *header;
MonoImage *image;
guint32 token, ins_flag;
MonoClass *klass;
MonoClass *constrained_class = NULL;
gboolean save_last_error = FALSE;
guchar *ip, *end, *target, *err_pos;
MonoMethodSignature *sig;
MonoGenericContext *generic_context = NULL;
MonoGenericContainer *generic_container = NULL;
MonoType **param_types;
int i, n, start_new_bblock, dreg;
int num_calls = 0, inline_costs = 0;
guint num_args;
GSList *class_inits = NULL;
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
gboolean sym_seq_points = FALSE;
MonoDebugMethodInfo *minfo;
MonoBitSet *seq_point_locs = NULL;
MonoBitSet *seq_point_set_locs = NULL;
const char *ovf_exc = NULL;
gboolean emitted_funccall_seq_point = FALSE;
gboolean detached_before_ret = FALSE;
gboolean ins_has_side_effect;
if (!cfg->disable_inline)
cfg->disable_inline = (method->iflags & METHOD_IMPL_ATTRIBUTE_NOOPTIMIZATION) || is_jit_optimizer_disabled (method);
cfg->current_method = method;
image = m_class_get_image (method->klass);
/* serialization and xdomain stuff may need access to private fields and methods */
dont_verify = FALSE;
dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
/* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
header = mono_method_get_header_checked (method, cfg->error);
if (!header) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
} else {
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
generic_container = mono_method_get_generic_container (method);
sig = mono_method_signature_internal (method);
num_args = sig->hasthis + sig->param_count;
ip = (guchar*)header->code;
cfg->cil_start = ip;
end = ip + header->code_size;
cfg->stat_cil_code_size += header->code_size;
seq_points = cfg->gen_seq_points && cfg->method == method;
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
/* We could hit a seq point before attaching to the JIT (#8338) */
seq_points = FALSE;
}
if (method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
if (info->subtype == WRAPPER_SUBTYPE_INTERP_IN) {
/* We could hit a seq point before attaching to the JIT (#8338) */
seq_points = FALSE;
}
}
if (cfg->prof_coverage) {
if (cfg->compile_aot)
g_error ("Coverage profiling is not supported with AOT.");
INLINE_FAILURE ("coverage profiling");
cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
}
if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) {
minfo = mono_debug_lookup_method (method);
if (minfo) {
MonoSymSeqPoint *sps;
int i, n_il_offsets;
mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
sym_seq_points = TRUE;
for (i = 0; i < n_il_offsets; ++i) {
if (sps [i].il_offset < header->code_size)
mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
}
g_free (sps);
MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
if (asyncMethod) {
for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
{
mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
}
mono_debug_free_method_async_debug_info (asyncMethod);
}
} else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) {
/* Methods without line number info like auto-generated property accessors */
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
sym_seq_points = TRUE;
}
}
/*
* Methods without init_locals set could cause asserts in various passes
* (#497220). To work around this, we emit dummy initialization opcodes
* (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
* on some platforms.
*/
if (cfg->opt & MONO_OPT_UNSAFE)
init_locals = header->init_locals;
else
init_locals = TRUE;
method_definition = method;
while (method_definition->is_inflated) {
MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
method_definition = imethod->declaring;
}
/* SkipVerification is not allowed if core-clr is enabled */
if (!dont_verify && mini_assembly_can_skip_verification (method)) {
dont_verify = TRUE;
dont_verify_stloc = TRUE;
}
if (sig->is_inflated)
generic_context = mono_method_get_context (method);
else if (generic_container)
generic_context = &generic_container->context;
cfg->generic_context = generic_context;
if (!cfg->gshared)
g_assert (!sig->has_type_parameters);
if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
g_assert (method->is_inflated);
g_assert (mono_method_get_context (method)->method_inst);
}
if (method->is_inflated && mono_method_get_context (method)->method_inst)
g_assert (sig->generic_param_count);
if (cfg->method == method) {
cfg->real_offset = 0;
} else {
cfg->real_offset = inline_offset;
}
cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
cfg->cil_offset_to_bb_len = header->code_size;
if (cfg->verbose_level > 2)
printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
if (sig->hasthis)
param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass);
for (n = 0; n < sig->param_count; ++n)
param_types [n + sig->hasthis] = sig->params [n];
cfg->arg_types = param_types;
cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
if (cfg->method == method) {
/* ENTRY BLOCK */
NEW_BBLOCK (cfg, start_bblock);
cfg->bb_entry = start_bblock;
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
/* EXIT BLOCK */
NEW_BBLOCK (cfg, end_bblock);
cfg->bb_exit = end_bblock;
end_bblock->cil_code = NULL;
end_bblock->cil_length = 0;
end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
g_assert (cfg->num_bblocks == 2);
arg_array = cfg->args;
if (header->num_clauses) {
cfg->spvars = g_hash_table_new (NULL, NULL);
cfg->exvars = g_hash_table_new (NULL, NULL);
}
cfg->clause_is_dead = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * header->num_clauses);
/* handle exception clauses */
for (i = 0; i < header->num_clauses; ++i) {
MonoBasicBlock *try_bb;
MonoExceptionClause *clause = &header->clauses [i];
GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
try_bb->real_offset = clause->try_offset;
try_bb->try_start = TRUE;
GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
tblock->real_offset = clause->handler_offset;
tblock->flags |= BB_EXCEPTION_HANDLER;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
mono_create_exvar_for_offset (cfg, clause->handler_offset);
/*
* Linking the try block with the EH block hinders inlining as we won't be able to
* merge the bblocks from inlining and produce an artificial hole for no good reason.
*/
if (COMPILE_LLVM (cfg))
link_bblock (cfg, try_bb, tblock);
if (*(ip + clause->handler_offset) == CEE_POP)
tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
MONO_ADD_INS (tblock, ins);
if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
/* finally clauses already have a seq point */
/* seq points for filter clauses are emitted below */
NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
MONO_ADD_INS (tblock, ins);
}
/* todo: is a fault block unsafe to optimize? */
if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
tblock->flags |= BB_EXCEPTION_UNSAFE;
}
/*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
while (p < end) {
printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
}*/
/* catch and filter blocks get the exception object on the stack */
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
/* mostly like handle_stack_args (), but just sets the input args */
/* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
tblock->in_scount = 1;
tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
cfg->cbb = tblock;
#ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
/* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
if (!cfg->compile_llvm) {
MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
ins->dreg = tblock->in_stack [0]->dreg;
MONO_ADD_INS (tblock, ins);
}
#else
MonoInst *dummy_use;
/*
* Add a dummy use for the exvar so its liveness info will be
* correct.
*/
EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
#endif
if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
MONO_ADD_INS (tblock, ins);
}
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
tblock->flags |= BB_EXCEPTION_HANDLER;
tblock->real_offset = clause->data.filter_offset;
tblock->in_scount = 1;
tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
/* The filter block shares the exvar with the handler block */
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
MONO_ADD_INS (tblock, ins);
}
}
if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
clause->data.catch_class &&
cfg->gshared &&
mono_class_check_context_used (clause->data.catch_class)) {
/*
* In shared generic code with catch
* clauses containing type variables
* the exception handling code has to
* be able to get to the rgctx.
* Therefore we have to make sure that
* the vtable/mrgctx argument (for
* static or generic methods) or the
* "this" argument (for non-static
* methods) are live.
*/
if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method)->method_inst ||
m_class_is_valuetype (method->klass)) {
mono_get_vtable_var (cfg);
} else {
MonoInst *dummy_use;
EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
}
}
}
} else {
arg_array = g_newa (MonoInst*, num_args);
cfg->cbb = start_bblock;
cfg->args = arg_array;
mono_save_args (cfg, sig, inline_args);
}
if (cfg->method == method && cfg->self_init && cfg->compile_aot && !COMPILE_LLVM (cfg)) {
MonoMethod *wrapper;
MonoInst *args [2];
int idx;
/*
* Emit code to initialize this method by calling the init wrapper emitted by LLVM.
* This is not efficient right now, but its only used for the methods which fail
* LLVM compilation.
* FIXME: Optimize this
*/
g_assert (!cfg->gshared);
wrapper = mono_marshal_get_aot_init_wrapper (AOT_INIT_METHOD);
/* Emit this into the entry bb so it comes before the GC safe point which depends on an inited GOT */
cfg->cbb = cfg->bb_entry;
idx = mono_aot_get_method_index (cfg->method);
EMIT_NEW_ICONST (cfg, args [0], idx);
/* Dummy */
EMIT_NEW_ICONST (cfg, args [1], 0);
mono_emit_method_call (cfg, wrapper, args, NULL);
}
if (cfg->llvm_only && cfg->interp && cfg->method == method && !cfg->deopt) {
if (header->num_clauses) {
for (int i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
/* Finally clauses are checked after the remove_finally pass */
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
cfg->interp_entry_only = TRUE;
}
}
}
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
if (cfg->method == method)
cfg->bb_init = init_localsbb;
init_localsbb->real_offset = cfg->real_offset;
start_bblock->next_bb = init_localsbb;
link_bblock (cfg, start_bblock, init_localsbb);
init_localsbb2 = init_localsbb;
cfg->cbb = init_localsbb;
if (cfg->gsharedvt && cfg->method == method) {
MonoGSharedVtMethodInfo *info;
MonoInst *var, *locals_var;
int dreg;
info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
info->method = cfg->method;
info->count_entries = 16;
info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
cfg->gsharedvt_info = info;
var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
//var->flags |= MONO_INST_VOLATILE;
cfg->gsharedvt_info_var = var;
ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
/* Allocate locals */
locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
//locals_var->flags |= MONO_INST_VOLATILE;
cfg->gsharedvt_locals_var = locals_var;
dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
ins->dreg = locals_var->dreg;
ins->sreg1 = dreg;
MONO_ADD_INS (cfg->cbb, ins);
cfg->gsharedvt_locals_var_ins = ins;
cfg->flags |= MONO_CFG_HAS_ALLOCA;
/*
if (init_locals)
ins->flags |= MONO_INST_INIT;
*/
if (cfg->llvm_only) {
init_localsbb = cfg->cbb;
init_localsbb2 = cfg->cbb;
}
}
if (cfg->deopt) {
/*
* Push an LMFExt frame which points to a MonoMethodILState structure.
*/
emit_push_lmf (cfg);
/* The type doesn't matter, the llvm backend will use the correct type */
MonoInst *il_state_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
il_state_var->flags |= MONO_INST_VOLATILE;
cfg->il_state_var = il_state_var;
EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL);
int il_state_addr_reg = ins->dreg;
/* il_state->method = method */
MonoInst *method_ins = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_METHOD);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, il_state_addr_reg, MONO_STRUCT_OFFSET (MonoMethodILState, method), method_ins->dreg);
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
int lmf_reg = ins->dreg;
/* lmf->kind = MONO_LMFEXT_IL_STATE */
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, kind), MONO_LMFEXT_IL_STATE);
/* lmf->il_state = il_state */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, il_state), il_state_addr_reg);
/* emit_get_rgctx_method () might create new bblocks */
if (cfg->llvm_only) {
init_localsbb = cfg->cbb;
init_localsbb2 = cfg->cbb;
}
}
if (cfg->llvm_only && cfg->interp && cfg->method == method) {
if (cfg->interp_entry_only)
emit_llvmonly_interp_entry (cfg, header);
}
/* FIRST CODE BLOCK */
NEW_BBLOCK (cfg, tblock);
tblock->cil_code = ip;
cfg->cbb = tblock;
cfg->ip = ip;
init_localsbb->next_bb = cfg->cbb;
link_bblock (cfg, init_localsbb, cfg->cbb);
ADD_BBLOCK (cfg, tblock);
CHECK_CFG_EXCEPTION;
if (header->code_size == 0)
UNVERIFIED;
if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
ip = err_pos;
UNVERIFIED;
}
if (cfg->method == method) {
int breakpoint_id = mono_debugger_method_has_breakpoint (method);
if (breakpoint_id) {
MONO_INST_NEW (cfg, ins, OP_BREAK);
MONO_ADD_INS (cfg->cbb, ins);
}
mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
}
for (n = 0; n < header->num_locals; ++n) {
if (header->locals [n]->type == MONO_TYPE_VOID && !m_type_is_byref (header->locals [n]))
UNVERIFIED;
}
class_inits = NULL;
/* We force the vtable variable here for all shared methods
for the possibility that they might show up in a stack
trace where their exact instantiation is needed. */
if (cfg->gshared && method == cfg->method) {
if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method)->method_inst ||
m_class_is_valuetype (method->klass)) {
mono_get_vtable_var (cfg);
} else {
/* FIXME: Is there a better way to do this?
We need the variable live for the duration
of the whole method. */
cfg->args [0]->flags |= MONO_INST_VOLATILE;
}
}
/* add a check for this != NULL to inlined methods */
if (is_virtual_call) {
MonoInst *arg_ins;
//
// This is just a hack to avoid checks in empty methods which could get inlined
// into finally clauses preventing the removal of empty finally clauses, since all
// variables in finally clauses are marked volatile so the check can't be removed
//
if (!(cfg->llvm_only && m_class_is_valuetype (method->klass) && header->code_size == 1 && header->code [0] == CEE_RET)) {
NEW_ARGLOAD (cfg, arg_ins, 0);
MONO_ADD_INS (cfg->cbb, arg_ins);
MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
}
}
skip_dead_blocks = !dont_verify;
if (skip_dead_blocks) {
original_bb = bb = mono_basic_block_split (method, cfg->error, header);
CHECK_CFG_ERROR;
g_assert (bb);
}
/* we use a spare stack slot in SWITCH and NEWOBJ and others */
stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
ins_flag = 0;
start_new_bblock = 0;
MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid;
emit_set_deopt_il_offset (cfg, ip - cfg->cil_start);
for (guchar *next_ip = ip; ip < end; ip = next_ip) {
MonoOpcodeEnum previous_il_op = il_op;
const guchar *tmp_ip = ip;
const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op);
CHECK_OPSIZE (op_size);
next_ip += op_size;
if (cfg->method == method)
cfg->real_offset = ip - header->code;
else
cfg->real_offset = inline_offset;
cfg->ip = ip;
context_used = 0;
if (start_new_bblock) {
cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
if (start_new_bblock == 2) {
g_assert (ip == tblock->cil_code);
} else {
GET_BBLOCK (cfg, tblock, ip);
}
cfg->cbb->next_bb = tblock;
cfg->cbb = tblock;
start_new_bblock = 0;
for (i = 0; i < cfg->cbb->in_scount; ++i) {
if (cfg->verbose_level > 3)
printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
*sp++ = ins;
}
if (class_inits)
g_slist_free (class_inits);
class_inits = NULL;
emit_set_deopt_il_offset (cfg, ip - cfg->cil_start);
} else {
if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
link_bblock (cfg, cfg->cbb, tblock);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
cfg->cbb->next_bb = tblock;
cfg->cbb = tblock;
for (i = 0; i < cfg->cbb->in_scount; ++i) {
if (cfg->verbose_level > 3)
printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
*sp++ = ins;
}
g_slist_free (class_inits);
class_inits = NULL;
emit_set_deopt_il_offset (cfg, ip - cfg->cil_start);
}
}
/*
* Methods with AggressiveInline flag could be inlined even if the class has a cctor.
* This might create a branch so emit it in the first code bblock instead of into initlocals_bb.
*/
if (ip - header->code == 0 && cfg->method != method && cfg->compile_aot && (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && mono_class_needs_cctor_run (method->klass, method)) {
emit_class_init (cfg, method->klass);
}
if (skip_dead_blocks) {
int ip_offset = ip - header->code;
if (ip_offset == bb->end)
bb = bb->next;
if (bb->dead) {
g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
if (ip_offset + op_size == bb->end) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
}
continue;
}
}
/*
* Sequence points are points where the debugger can place a breakpoint.
* Currently, we generate these automatically at points where the IL
* stack is empty.
*/
if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
/*
* Make methods interruptable at the beginning, and at the targets of
* backward branches.
* Also, do this at the start of every bblock in methods with clauses too,
* to be able to handle instructions with inprecise control flow like
* throw/endfinally.
* Backward branches are handled at the end of method-to-ir ().
*/
gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
/* Avoid sequence points on empty IL like .volatile */
// FIXME: Enable this
//if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
if ((sp != stack_start) && !sym_seq_point)
ins->flags |= MONO_INST_NONEMPTY_STACK;
MONO_ADD_INS (cfg->cbb, ins);
if (sym_seq_points)
mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
if (cfg->prof_coverage) {
guint32 cil_offset = ip - header->code;
gpointer counter = &cfg->coverage_info->data [cil_offset].count;
cfg->coverage_info->data [cil_offset].cil_code = ip;
if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
MonoInst *one_ins, *load_ins;
EMIT_NEW_PCONST (cfg, load_ins, counter);
EMIT_NEW_ICONST (cfg, one_ins, 1);
MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = load_ins->dreg;
ins->inst_offset = 0;
ins->sreg2 = one_ins->dreg;
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
} else {
EMIT_NEW_PCONST (cfg, ins, counter);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
}
}
}
cfg->cbb->real_offset = cfg->real_offset;
if (cfg->verbose_level > 3)
printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
/*
* This is used to compute BB_HAS_SIDE_EFFECTS, which is used for the elimination of
* foreach finally clauses, so only IL opcodes which occur in such clauses
* need to set this.
*/
ins_has_side_effect = TRUE;
// Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP.
// Initialize to either what they all need or zero.
gboolean emit_widen = TRUE;
gboolean tailcall = FALSE;
gboolean common_call = FALSE;
MonoInst *keep_this_alive = NULL;
MonoMethod *cmethod = NULL;
MonoMethodSignature *fsig = NULL;
// These are used only in CALL/CALLVIRT but must be initialized also for CALLI,
// since it jumps into CALL/CALLVIRT.
gboolean need_seq_point = FALSE;
gboolean push_res = TRUE;
gboolean skip_ret = FALSE;
gboolean tailcall_remove_ret = FALSE;
// FIXME split 500 lines load/store field into separate file/function.
MonoOpcodeParameter parameter;
const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, ¶meter);
g_assert (info);
n = parameter.i32;
token = parameter.i32;
target = parameter.branch_target;
// Check stack size for push/pop except variable cases -- -1 like call/ret/newobj.
const int pushes = info->pushes;
const int pops = info->pops;
if (pushes >= 0 && pops >= 0) {
g_assert (pushes - pops <= 1);
if (pushes - pops == 1)
CHECK_STACK_OVF ();
}
if (pops >= 0)
CHECK_STACK (pops);
switch (il_op) {
case MONO_CEE_NOP:
if (seq_points && !sym_seq_points && sp != stack_start) {
/*
* The C# compiler uses these nops to notify the JIT that it should
* insert seq points.
*/
NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
MONO_ADD_INS (cfg->cbb, ins);
}
if (cfg->keep_cil_nops)
MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
else
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
emitted_funccall_seq_point = FALSE;
ins_has_side_effect = FALSE;
break;
case MONO_CEE_BREAK:
if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
}
break;
case MONO_CEE_LDARG_0:
case MONO_CEE_LDARG_1:
case MONO_CEE_LDARG_2:
case MONO_CEE_LDARG_3:
case MONO_CEE_LDARG_S:
case MONO_CEE_LDARG:
CHECK_ARG (n);
if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) {
EMIT_NEW_ARGLOADA (cfg, ins, n);
} else {
EMIT_NEW_ARGLOAD (cfg, ins, n);
}
*sp++ = ins;
break;
case MONO_CEE_LDLOC_0:
case MONO_CEE_LDLOC_1:
case MONO_CEE_LDLOC_2:
case MONO_CEE_LDLOC_3:
case MONO_CEE_LDLOC_S:
case MONO_CEE_LDLOC:
CHECK_LOCAL (n);
if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) {
EMIT_NEW_LOCLOADA (cfg, ins, n);
} else {
EMIT_NEW_LOCLOAD (cfg, ins, n);
}
*sp++ = ins;
break;
case MONO_CEE_STLOC_0:
case MONO_CEE_STLOC_1:
case MONO_CEE_STLOC_2:
case MONO_CEE_STLOC_3:
case MONO_CEE_STLOC_S:
case MONO_CEE_STLOC:
CHECK_LOCAL (n);
--sp;
*sp = convert_value (cfg, header->locals [n], *sp);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
UNVERIFIED;
emit_stloc_ir (cfg, sp, header, n);
inline_costs += 1;
break;
case MONO_CEE_LDARGA_S:
case MONO_CEE_LDARGA:
CHECK_ARG (n);
NEW_ARGLOADA (cfg, ins, n);
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_STARG_S:
case MONO_CEE_STARG:
--sp;
CHECK_ARG (n);
*sp = convert_value (cfg, param_types [n], *sp);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
UNVERIFIED;
emit_starg_ir (cfg, sp, n);
break;
case MONO_CEE_LDLOCA:
case MONO_CEE_LDLOCA_S: {
guchar *tmp_ip;
CHECK_LOCAL (n);
if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) {
next_ip = tmp_ip;
il_op = MONO_CEE_INITOBJ;
inline_costs += 1;
break;
}
ins_has_side_effect = FALSE;
EMIT_NEW_LOCLOADA (cfg, ins, n);
*sp++ = ins;
break;
}
case MONO_CEE_LDNULL:
EMIT_NEW_PCONST (cfg, ins, NULL);
ins->type = STACK_OBJ;
*sp++ = ins;
break;
case MONO_CEE_LDC_I4_M1:
case MONO_CEE_LDC_I4_0:
case MONO_CEE_LDC_I4_1:
case MONO_CEE_LDC_I4_2:
case MONO_CEE_LDC_I4_3:
case MONO_CEE_LDC_I4_4:
case MONO_CEE_LDC_I4_5:
case MONO_CEE_LDC_I4_6:
case MONO_CEE_LDC_I4_7:
case MONO_CEE_LDC_I4_8:
case MONO_CEE_LDC_I4_S:
case MONO_CEE_LDC_I4:
EMIT_NEW_ICONST (cfg, ins, n);
*sp++ = ins;
break;
case MONO_CEE_LDC_I8:
MONO_INST_NEW (cfg, ins, OP_I8CONST);
ins->type = STACK_I8;
ins->dreg = alloc_dreg (cfg, STACK_I8);
ins->inst_l = parameter.i64;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_LDC_R4: {
float *f;
gboolean use_aotconst = FALSE;
#ifdef TARGET_POWERPC
/* FIXME: Clean this up */
if (cfg->compile_aot)
use_aotconst = TRUE;
#endif
/* FIXME: we should really allocate this only late in the compilation process */
f = (float *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (float));
if (use_aotconst) {
MonoInst *cons;
int dreg;
EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
dreg = alloc_freg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
ins->type = cfg->r4_stack_type;
} else {
MONO_INST_NEW (cfg, ins, OP_R4CONST);
ins->type = cfg->r4_stack_type;
ins->dreg = alloc_dreg (cfg, STACK_R8);
ins->inst_p0 = f;
MONO_ADD_INS (cfg->cbb, ins);
}
*f = parameter.f;
*sp++ = ins;
break;
}
case MONO_CEE_LDC_R8: {
double *d;
gboolean use_aotconst = FALSE;
#ifdef TARGET_POWERPC
/* FIXME: Clean this up */
if (cfg->compile_aot)
use_aotconst = TRUE;
#endif
/* FIXME: we should really allocate this only late in the compilation process */
d = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double));
if (use_aotconst) {
MonoInst *cons;
int dreg;
EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
dreg = alloc_freg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
ins->type = STACK_R8;
} else {
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
ins->dreg = alloc_dreg (cfg, STACK_R8);
ins->inst_p0 = d;
MONO_ADD_INS (cfg->cbb, ins);
}
*d = parameter.d;
*sp++ = ins;
break;
}
case MONO_CEE_DUP: {
MonoInst *temp, *store;
MonoClass *klass;
sp--;
ins = *sp;
klass = ins->klass;
temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
ins->klass = klass;
*sp++ = ins;
EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
ins->klass = klass;
*sp++ = ins;
inline_costs += 2;
break;
}
case MONO_CEE_POP:
--sp;
#ifdef TARGET_X86
if (sp [0]->type == STACK_R8)
/* we need to pop the value from the x86 FP stack */
MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
#endif
break;
case MONO_CEE_JMP: {
MonoCallInst *call;
int i, n;
INLINE_FAILURE ("jmp");
GSHAREDVT_FAILURE (il_op);
if (stack_start != sp)
UNVERIFIED;
/* FIXME: check the signature matches */
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
CHECK_CFG_ERROR;
if (cfg->gshared && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
mini_profiler_emit_tail_call (cfg, cmethod);
fsig = mono_method_signature_internal (cmethod);
n = fsig->param_count + fsig->hasthis;
if (cfg->llvm_only) {
MonoInst **args;
args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
for (i = 0; i < n; ++i)
EMIT_NEW_ARGLOAD (cfg, args [i], i);
ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
/*
* The code in mono-basic-block.c treats the rest of the code as dead, but we
* have to emit a normal return since llvm expects it.
*/
if (cfg->ret)
emit_setret (cfg, ins);
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, end_bblock);
break;
} else {
/* Handle tailcalls similarly to calls */
DISABLE_AOT (cfg);
mini_emit_tailcall_parameters (cfg, fsig);
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
call->method = cmethod;
// FIXME Other initialization of the tailcall field occurs after
// it is used. So this is the only "real" use and needs more attention.
call->tailcall = TRUE;
call->signature = fsig;
call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
call->inst.inst_p0 = cmethod;
for (i = 0; i < n; ++i)
EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
call->vret_var = cfg->vret_addr;
mono_arch_emit_call (cfg, call);
cfg->param_area = MAX(cfg->param_area, call->stack_usage);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
}
start_new_bblock = 1;
break;
}
case MONO_CEE_CALLI: {
// FIXME tail.calli is problemetic because the this pointer's type
// is not in the signature, and we cannot check for a byref valuetype.
MonoInst *addr;
MonoInst *callee = NULL;
// Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic
cmethod = NULL;
gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
? (next_ip < end && next_ip [0] == CEE_RET)
: ((ins_flag & MONO_INST_TAILCALL) != 0));
ins = NULL;
//GSHAREDVT_FAILURE (il_op);
CHECK_STACK (1);
--sp;
addr = *sp;
g_assert (addr);
fsig = mini_get_signature (method, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
if (method->dynamic && fsig->pinvoke) {
MonoInst *args [3];
/*
* This is a call through a function pointer using a pinvoke
* signature. Have to create a wrapper and call that instead.
* FIXME: This is very slow, need to create a wrapper at JIT time
* instead based on the signature.
*/
EMIT_NEW_IMAGECONST (cfg, args [0], ((MonoDynamicMethod*)method)->assembly->image);
EMIT_NEW_PCONST (cfg, args [1], fsig);
args [2] = addr;
// FIXME tailcall?
addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
}
if (!method->dynamic && fsig->pinvoke &&
!method->wrapper_type) {
/* MONO_WRAPPER_DYNAMIC_METHOD dynamic method handled above in the
method->dynamic case; for other wrapper types assume the code knows
what its doing and added its own GC transitions */
gboolean skip_gc_trans = fsig->suppress_gc_transition;
if (!skip_gc_trans) {
#if 0
fprintf (stderr, "generating wrapper for calli in method %s with wrapper type %s\n", method->name, mono_wrapper_type_to_str (method->wrapper_type));
#endif
/* Call the wrapper that will do the GC transition instead */
MonoMethod *wrapper = mono_marshal_get_native_func_wrapper_indirect (method->klass, fsig, cfg->compile_aot);
fsig = mono_method_signature_internal (wrapper);
n = fsig->param_count - 1; /* wrapper has extra fnptr param */
CHECK_STACK (n);
/* move the args to allow room for 'this' in the first position */
while (n--) {
--sp;
sp [1] = sp [0];
}
sp[0] = addr; /* n+1 args, first arg is the address of the indirect method to call */
g_assert (!fsig->hasthis && !fsig->pinvoke);
ins = mono_emit_method_call (cfg, wrapper, /*args*/sp, NULL);
goto calli_end;
}
}
n = fsig->param_count + fsig->hasthis;
CHECK_STACK (n);
//g_assert (!virtual_ || fsig->hasthis);
sp -= n;
if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) {
if (break_on_unverified ())
check_call_signature (cfg, fsig, sp); // Again, step through it.
UNVERIFIED;
}
inline_costs += CALL_COST * MIN(10, num_calls++);
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
/*
* We pass the address to the gsharedvt trampoline in the rgctx reg
*/
callee = addr;
g_assert (addr); // Doubles as boolean after tailcall check.
}
inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig,
FALSE/*virtual irrelevant*/, addr != NULL, &tailcall);
if (save_last_error)
mono_emit_jit_icall (cfg, mono_marshal_clear_last_error, NULL);
if (callee) {
if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
/* Not tested */
GSHAREDVT_FAILURE (il_op);
if (cfg->llvm_only)
// FIXME:
GSHAREDVT_FAILURE (il_op);
addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall);
goto calli_end;
}
/* Prevent inlining of methods with indirect calls */
INLINE_FAILURE ("indirect call");
if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
MonoJumpInfoType info_type;
gpointer info_data;
/*
* Instead of emitting an indirect call, emit a direct call
* with the contents of the aotconst as the patch info.
*/
if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
info_type = (MonoJumpInfoType)addr->inst_c1;
info_data = addr->inst_p0;
} else {
info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
info_data = addr->inst_right->inst_left;
}
if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
// non-JIT icall, mostly builtin, but also user-extensible
tailcall = FALSE;
ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
NULLIFY_INS (addr);
goto calli_end;
} else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR
|| info_type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR) {
tailcall = FALSE;
ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp);
NULLIFY_INS (addr);
goto calli_end;
}
}
if (cfg->llvm_only && !(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD))
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall);
goto calli_end;
}
case MONO_CEE_CALL:
case MONO_CEE_CALLVIRT: {
MonoInst *addr; addr = NULL;
int array_rank; array_rank = 0;
gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT;
gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE;
MonoInst *imt_arg; imt_arg = NULL;
gboolean pass_vtable; pass_vtable = FALSE;
gboolean pass_mrgctx; pass_mrgctx = FALSE;
MonoInst *vtable_arg; vtable_arg = NULL;
gboolean check_this; check_this = FALSE;
gboolean delegate_invoke; delegate_invoke = FALSE;
gboolean direct_icall; direct_icall = FALSE;
gboolean tailcall_calli; tailcall_calli = FALSE;
gboolean noreturn; noreturn = FALSE;
gboolean gshared_static_virtual; gshared_static_virtual = FALSE;
#ifdef TARGET_WASM
gboolean needs_stack_walk; needs_stack_walk = FALSE;
#endif
// Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
common_call = FALSE;
// variables to help in assertions
gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE;
MonoMethod *tailcall_method; tailcall_method = NULL;
MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL;
MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL;
gboolean tailcall_virtual; tailcall_virtual = FALSE;
gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE;
gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
? (next_ip < end && next_ip [0] == CEE_RET)
: ((ins_flag & MONO_INST_TAILCALL) != 0));
ins = NULL;
/* Used to pass arguments to called functions */
HandleCallData cdata;
memset (&cdata, 0, sizeof (HandleCallData));
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
CHECK_CFG_ERROR;
if (cfg->verbose_level > 3)
printf ("cmethod = %s\n", mono_method_get_full_name (cmethod));
MonoMethod *cil_method; cil_method = cmethod;
if (constrained_class) {
if (m_method_is_static (cil_method) && mini_class_check_context_used (cfg, constrained_class)) {
/* get_constrained_method () doesn't work on the gparams used by generic sharing */
// FIXME: Other configurations
//if (!cfg->gsharedvt)
// GENERIC_SHARING_FAILURE (CEE_CALL);
gshared_static_virtual = TRUE;
} else {
cmethod = get_constrained_method (cfg, image, token, cil_method, constrained_class, generic_context);
CHECK_CFG_ERROR;
if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) {
/* Use the corresponding method from the base type to avoid boxing */
MonoType *base_type = mono_class_enum_basetype_internal (constrained_class);
g_assert (base_type);
constrained_class = mono_class_from_mono_type_internal (base_type);
cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0);
g_assert (cmethod);
}
}
}
if (!dont_verify && !cfg->skip_visibility) {
MonoMethod *target_method = cil_method;
if (method->is_inflated) {
MonoGenericContainer *container = mono_method_get_generic_container(method_definition);
MonoGenericContext *context = (container != NULL ? &container->context : NULL);
target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error);
CHECK_CFG_ERROR;
}
if (!mono_method_can_access_method (method_definition, target_method) &&
!mono_method_can_access_method (method, cil_method))
emit_method_access_failure (cfg, method, cil_method);
}
if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
if (cfg->interp && !cfg->interp_entry_only) {
/* Use the interpreter instead */
cfg->exception_message = g_strdup ("stack walk");
cfg->disable_llvm = TRUE;
}
#ifdef TARGET_WASM
else {
needs_stack_walk = TRUE;
}
#endif
}
if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT) && !gshared_static_virtual) {
if (!mono_class_is_interface (method->klass))
emit_bad_image_failure (cfg, method, cil_method);
else
virtual_ = TRUE;
}
if (!m_class_is_inited (cmethod->klass))
if (!mono_class_init_internal (cmethod->klass))
TYPE_LOAD_ERROR (cmethod->klass);
fsig = mono_method_signature_internal (cmethod);
if (!fsig)
LOAD_ERROR;
if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
mini_class_is_system_array (cmethod->klass)) {
array_rank = m_class_get_rank (cmethod->klass);
} else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg, cmethod)) {
direct_icall = TRUE;
} else if (fsig->pinvoke) {
if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) {
/*
* Avoid calling mono_marshal_get_native_wrapper () too early, it might call managed
* callbacks on netcore.
*/
fsig = mono_metadata_signature_dup_mempool (cfg->mempool, fsig);
fsig->pinvoke = FALSE;
} else {
MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
fsig = mono_method_signature_internal (wrapper);
}
} else if (constrained_class) {
} else {
fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
}
if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
/* See code below */
if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
MonoBasicBlock *tbb;
GET_BBLOCK (cfg, tbb, next_ip);
if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
/*
* We want to extend the try block to cover the call, but we can't do it if the
* call is made directly since its followed by an exception check.
*/
direct_icall = FALSE;
}
}
mono_save_token_info (cfg, image, token, cil_method);
if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
need_seq_point = TRUE;
/* Don't support calls made using type arguments for now */
/*
if (cfg->gsharedvt) {
if (mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (il_op);
}
*/
if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
g_assert_not_reached ();
n = fsig->param_count + fsig->hasthis;
if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
UNVERIFIED;
if (!cfg->gshared)
g_assert (!mono_method_check_context_used (cmethod));
CHECK_STACK (n);
//g_assert (!virtual_ || fsig->hasthis);
sp -= n;
if (virtual_ && cmethod && sp [0] && sp [0]->opcode == OP_TYPED_OBJREF) {
ERROR_DECL (error);
MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, error);
if (is_ok (error)) {
cmethod = new_cmethod;
virtual_ = FALSE;
} else {
mono_error_cleanup (error);
}
}
if (cmethod && method_does_not_return (cmethod)) {
cfg->cbb->out_of_line = TRUE;
noreturn = TRUE;
}
cdata.method = method;
cdata.inst_tailcall = inst_tailcall;
/*
* We have the `constrained.' prefix opcode.
*/
if (constrained_class) {
ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen);
CHECK_CFG_EXCEPTION;
if (!gshared_static_virtual)
constrained_class = NULL;
if (ins)
goto call_end;
}
for (int i = 0; i < fsig->param_count; ++i)
sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
if (check_call_signature (cfg, fsig, sp)) {
if (break_on_unverified ())
check_call_signature (cfg, fsig, sp); // Again, step through it.
UNVERIFIED;
}
if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
delegate_invoke = TRUE;
/*
* Implement a workaround for the inherent races involved in locking:
* Monitor.Enter ()
* try {
* } finally {
* Monitor.Exit ()
* }
* If a thread abort happens between the call to Monitor.Enter () and the start of the
* try block, the Exit () won't be executed, see:
* http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
* To work around this, we extend such try blocks to include the last x bytes
* of the Monitor.Enter () call.
*/
if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
MonoBasicBlock *tbb;
GET_BBLOCK (cfg, tbb, next_ip);
/*
* Only extend try blocks with a finally, to avoid catching exceptions thrown
* from Monitor.Enter like ArgumentNullException.
*/
if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
/* Mark this bblock as needing to be extended */
tbb->extend_try_block = TRUE;
}
}
/* Conversion to a JIT intrinsic */
gboolean ins_type_initialized;
if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp, &ins_type_initialized))) {
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
if (!ins_type_initialized)
mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
emit_widen = FALSE;
}
// FIXME This is only missed if in fact the intrinsic involves a call.
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
CHECK_CFG_ERROR;
/*
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
*/
if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
/* Inlining */
if ((cfg->opt & MONO_OPT_INLINE) && !inst_tailcall &&
(!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
mono_method_check_inlining (cfg, cmethod)) {
int costs;
gboolean always = FALSE;
gboolean is_empty = FALSE;
if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) {
/* Prevent inlining of methods that call wrappers */
INLINE_FAILURE ("wrapper call");
// FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
// Neither pinvoke or icall are likely to be tailcalled.
cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
always = TRUE;
}
costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &is_empty);
if (costs) {
cfg->real_offset += 5;
if (!MONO_TYPE_IS_VOID (fsig->ret))
/* *sp is already set by inline_method */
ins = *sp;
inline_costs += costs;
// FIXME This is missed if the inlinee contains tail calls that
// would work, but not once inlined into caller.
// This matchingness could be a factor in inlining.
// i.e. Do not inline if it hurts tailcall, do inline
// if it helps and/or or is neutral, and helps performance
// using usual heuristics.
// Note that inlining will expose multiple tailcall opportunities
// so the tradeoff is not obvious. If we can tailcall anything
// like desktop, then this factor mostly falls away, except
// that inlining can affect tailcall performance due to
// signature match/mismatch.
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name);
if (is_empty)
ins_has_side_effect = FALSE;
goto call_end;
}
}
check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
if (cfg->gshared) {
MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
context_used = mini_method_check_context_used (cfg, cmethod);
if (!context_used && gshared_static_virtual)
context_used = mini_class_check_context_used (cfg, constrained_class);
if (context_used && mono_class_is_interface (cmethod->klass) && !m_method_is_static (cmethod)) {
/* Generic method interface
calls are resolved via a
helper function and don't
need an imt. */
if (!cmethod_context || !cmethod_context->method_inst)
pass_imt_from_rgctx = TRUE;
}
/*
* If a shared method calls another
* shared method then the caller must
* have a generic sharing context
* because the magic trampoline
* requires it. FIXME: We shouldn't
* have to force the vtable/mrgctx
* variable here. Instead there
* should be a flag in the cfg to
* request a generic sharing context.
*/
if (context_used &&
((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass)))
mono_get_vtable_var (cfg);
}
if (pass_vtable) {
if (context_used) {
vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable_checked (cmethod->klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
}
if (pass_mrgctx) {
g_assert (!vtable_arg);
if (!cfg->compile_aot) {
/*
* emit_get_rgctx_method () calls mono_class_vtable () so check
* for type load errors before.
*/
mono_class_setup_vtable (cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod))) {
if (virtual_)
check_this = TRUE;
virtual_ = FALSE;
}
}
if (pass_imt_from_rgctx) {
g_assert (!pass_vtable);
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
}
if (check_this)
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
/* Calling virtual generic methods */
// These temporaries help detangle "pure" computation of
// inputs to is_supported_tailcall from side effects, so that
// is_supported_tailcall can be computed just once.
gboolean virtual_generic; virtual_generic = FALSE;
gboolean virtual_generic_imt; virtual_generic_imt = FALSE;
if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
!MONO_METHOD_IS_FINAL (cmethod) &&
fsig->generic_param_count &&
!(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
!cfg->llvm_only) {
g_assert (fsig->is_inflated);
virtual_generic = TRUE;
/* Prevent inlining of methods that contain indirect calls */
INLINE_FAILURE ("virtual generic call");
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (il_op);
if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
virtual_generic_imt = TRUE;
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
virtual_ = TRUE;
vtable_arg = NULL;
}
}
// Capture some intent before computing tailcall.
gboolean make_generic_call_out_of_gsharedvt_method;
gboolean will_have_imt_arg;
make_generic_call_out_of_gsharedvt_method = FALSE;
will_have_imt_arg = FALSE;
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
!(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) &&
(!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
make_generic_call_out_of_gsharedvt_method = TRUE;
if (virtual_) {
if (fsig->generic_param_count) {
will_have_imt_arg = TRUE;
} else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
will_have_imt_arg = TRUE;
}
}
}
/* Tail prefix / tailcall optimization */
/* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests.
Inlining and stack traces are not guaranteed however. */
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
// tailcall means "the backend can and will handle it".
// inst_tailcall means the tail. prefix is present.
tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass);
tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig,
virtual_, tailcall_extra_arg, &tailcall_calli);
// Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall).
// Capture values to later assert they don't change.
called_is_supported_tailcall = TRUE;
tailcall_method = method;
tailcall_cmethod = cmethod;
tailcall_fsig = fsig;
tailcall_virtual = virtual_;
if (virtual_generic) {
if (virtual_generic_imt) {
if (tailcall) {
/* Prevent inlining of methods with tailcalls (the call stack would be altered) */
INLINE_FAILURE ("tailcall");
}
common_call = TRUE;
goto call_end;
}
MonoInst *this_temp, *this_arg_temp, *store;
MonoInst *iargs [4];
this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
MONO_ADD_INS (cfg->cbb, store);
/* FIXME: This should be a managed pointer */
this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
CHECK_CFG_ERROR;
/* Tail recursion elimination */
if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) {
gboolean has_vtargs = FALSE;
int i;
/* Prevent inlining of methods with tailcalls (the call stack would be altered) */
INLINE_FAILURE ("tailcall");
/* keep it simple */
for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--)
has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]);
if (!has_vtargs) {
if (need_seq_point) {
emit_seq_point (cfg, method, ip, FALSE, TRUE);
need_seq_point = FALSE;
}
for (i = 0; i < n; ++i)
EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
mini_profiler_emit_tail_call (cfg, cmethod);
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (cfg->cbb, ins);
tblock = start_bblock->out_bb [0];
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
start_new_bblock = 1;
/* skip the CEE_RET, too */
if (ip_in_bb (cfg, cfg->cbb, next_ip))
skip_ret = TRUE;
push_res = FALSE;
need_seq_point = FALSE;
goto call_end;
}
}
inline_costs += CALL_COST * MIN(10, num_calls++);
/*
* Synchronized wrappers.
* Its hard to determine where to replace a method with its synchronized
* wrapper without causing an infinite recursion. The current solution is
* to add the synchronized wrapper in the trampolines, and to
* change the called method to a dummy wrapper, and resolve that wrapper
* to the real method in mono_jit_compile_method ().
*/
if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) {
// FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
}
}
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
if (make_generic_call_out_of_gsharedvt_method) {
if (virtual_) {
//if (mono_class_is_interface (cmethod->klass))
//GSHAREDVT_FAILURE (il_op);
// disable for possible remoting calls
if (fsig->hasthis && method->klass == mono_defaults.object_class)
GSHAREDVT_FAILURE (il_op);
if (fsig->generic_param_count) {
/* virtual generic call */
g_assert (!imt_arg);
g_assert (will_have_imt_arg);
/* Same as the virtual generic case above */
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
} else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
/* This can happen when we call a fully instantiated iface method */
g_assert (will_have_imt_arg);
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
}
/* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
vtable_arg = NULL;
}
if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
keep_this_alive = sp [0];
MonoRgctxInfoType info_type;
if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
else
info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
if (cfg->llvm_only) {
// FIXME: Avoid initializing vtable_arg
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name);
} else {
tailcall = tailcall_calli;
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
tailcall_remove_ret |= tailcall;
}
goto call_end;
}
/* Generic sharing */
/*
* Calls to generic methods from shared code cannot go through the trampoline infrastructure
* in some cases, because the called method might end up being different on every call.
* Load the called method address from the rgctx and do an indirect call in these cases.
* Use this if the callee is gsharedvt sharable too, since
* at runtime we might find an instantiation so the call cannot
* be patched (the 'no_patch' code path in mini-trampolines.c).
*/
gboolean gshared_indirect;
gshared_indirect = context_used && !imt_arg && !array_rank && !delegate_invoke;
if (gshared_indirect)
gshared_indirect = (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
!mono_class_generic_sharing_enabled (cmethod->klass) ||
gshared_static_virtual);
if (gshared_indirect)
gshared_indirect = (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL));
if (gshared_indirect) {
INLINE_FAILURE ("gshared");
g_assert (cfg->gshared && cmethod);
g_assert (!addr);
if (fsig->hasthis)
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
if (cfg->llvm_only) {
if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
/* Handled in handle_constrained_gsharedvt_call () */
g_assert (!gshared_static_virtual);
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
} else {
if (gshared_static_virtual)
addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
else
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC);
}
// FIXME: Avoid initializing imt_arg/vtable_arg
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name);
} else {
if (gshared_static_virtual) {
/*
* cmethod is a static interface method, the actual called method at runtime
* needs to be computed using constrained_class and cmethod.
*/
addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
} else {
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
}
if (inst_tailcall)
mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name);
tailcall = tailcall_calli;
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
tailcall_remove_ret |= tailcall;
}
goto call_end;
}
/* Direct calls to icalls */
if (direct_icall) {
MonoMethod *wrapper;
int costs;
/* Inline the wrapper */
wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, NULL);
g_assert (costs > 0);
cfg->real_offset += 5;
if (!MONO_TYPE_IS_VOID (fsig->ret))
/* *sp is already set by inline_method */
ins = *sp;
inline_costs += costs;
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
/* Array methods */
if (array_rank) {
MonoInst *addr;
if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
MonoInst *val = sp [fsig->param_count];
if (val->type == STACK_OBJ) {
MonoInst *iargs [ ] = { sp [0], val };
mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
}
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
if (!mini_debug_options.weak_memory_model && val->type == STACK_OBJ)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
mini_emit_write_barrier (cfg, addr, val);
if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
GSHAREDVT_FAILURE (il_op);
} else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
} else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly)
mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
readonly = FALSE;
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
ins = addr;
} else {
g_assert_not_reached ();
}
emit_widen = FALSE;
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
if (ins) {
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
/* Tail prefix / tailcall optimization */
if (tailcall) {
/* Prevent inlining of methods with tailcalls (the call stack would be altered) */
INLINE_FAILURE ("tailcall");
}
/*
* Virtual calls in llvm-only mode.
*/
if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
goto call_end;
}
/* Common call */
if (!(cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !method_does_not_return (cmethod))
INLINE_FAILURE ("call");
common_call = TRUE;
#ifdef TARGET_WASM
/* Push an LMF so these frames can be enumerated during stack walks by mono_arch_unwind_frame () */
if (needs_stack_walk && !cfg->deopt) {
MonoInst *method_ins;
int lmf_reg;
emit_push_lmf (cfg);
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
/* The lmf->method field will be used to look up the MonoJitInfo for this method */
method_ins = emit_get_rgctx_method (cfg, mono_method_check_context_used (cfg->method), cfg->method, MONO_RGCTX_INFO_METHOD);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, method), method_ins->dreg);
}
#endif
call_end:
// Check that the decision to tailcall would not have changed.
g_assert (!called_is_supported_tailcall || tailcall_method == method);
// FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway.
// If this still fails, restructure the code, or call tailcall_supported again and assert no change.
g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod);
g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig);
g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_);
g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass)));
if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing.
ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL,
imt_arg, vtable_arg);
/*
* Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C
* call can be devirtualized above.
*/
if (cmethod)
ins = handle_call_res_devirt (cfg, cmethod, ins);
#ifdef TARGET_WASM
if (common_call && needs_stack_walk && !cfg->deopt)
/* If an exception is thrown, the LMF is popped by a call to mini_llvmonly_pop_lmf () */
emit_pop_lmf (cfg);
#endif
if (noreturn) {
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
}
calli_end:
if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) {
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
// FIXME: Eliminate unreachable epilogs
/*
* OP_TAILCALL has no return value, so skip the CEE_RET if it is
* only reachable from this call.
*/
GET_BBLOCK (cfg, tblock, next_ip);
if (tblock == cfg->cbb || tblock->in_count == 0)
skip_ret = TRUE;
push_res = FALSE;
need_seq_point = FALSE;
}
if (ins_flag & MONO_INST_TAILCALL)
mini_test_tailcall (cfg, tailcall);
/* End of call, INS should contain the result of the call, if any */
if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
g_assert (ins);
if (emit_widen)
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
else
*sp++ = ins;
}
if (save_last_error) {
save_last_error = FALSE;
#ifdef TARGET_WIN32
// Making icalls etc could clobber the value so emit inline code
// to read last error on Windows.
MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
ins->dreg = alloc_dreg (cfg, STACK_I4);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
mono_emit_jit_icall (cfg, mono_marshal_set_last_error_windows, &ins);
#else
mono_emit_jit_icall (cfg, mono_marshal_set_last_error, NULL);
#endif
}
if (keep_this_alive) {
MonoInst *dummy_use;
/* See mini_emit_method_call_full () */
EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
}
if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
/*
* Clang can convert these calls to tailcalls which screw up the stack
* walk. This happens even when the -fno-optimize-sibling-calls
* option is passed to clang.
* Work around this by emitting a dummy call.
*/
mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
}
CHECK_CFG_EXCEPTION;
if (skip_ret) {
// FIXME When not followed by CEE_RET, correct behavior is to raise an exception.
g_assert (next_ip [0] == CEE_RET);
next_ip += 1;
il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear.
}
ins_flag = 0;
constrained_class = NULL;
if (need_seq_point) {
//check is is a nested call and remove the non_empty_stack of the last call, only for non native methods
if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) {
if (emitted_funccall_seq_point) {
if (cfg->last_seq_point)
cfg->last_seq_point->flags |= MONO_INST_NESTED_CALL;
}
else
emitted_funccall_seq_point = TRUE;
}
emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
}
break;
}
case MONO_CEE_RET:
if (!detached_before_ret)
mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
g_assert (!method_does_not_return (method));
if (cfg->method != method) {
/* return from inlined method */
/*
* If in_count == 0, that means the ret is unreachable due to
* being preceded by a throw. In that case, inline_method () will
* handle setting the return value
* (test case: test_0_inline_throw ()).
*/
if (return_var && cfg->cbb->in_count) {
MonoType *ret_type = mono_method_signature_internal (method)->ret;
MonoInst *store;
CHECK_STACK (1);
--sp;
*sp = convert_value (cfg, ret_type, *sp);
if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
UNVERIFIED;
//g_assert (returnvar != -1);
EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
cfg->ret_var_set = TRUE;
}
} else {
if (cfg->lmf_var && cfg->cbb->in_count && (!cfg->llvm_only || cfg->deopt))
emit_pop_lmf (cfg);
if (cfg->ret) {
MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (seq_points && !sym_seq_points) {
/*
* Place a seq point here too even through the IL stack is not
* empty, so a step over on
* call <FOO>
* ret
* will work correctly.
*/
NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
MONO_ADD_INS (cfg->cbb, ins);
}
g_assert (!return_var);
CHECK_STACK (1);
--sp;
*sp = convert_value (cfg, ret_type, *sp);
if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
UNVERIFIED;
emit_setret (cfg, *sp);
}
}
if (sp != stack_start)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
case MONO_CEE_BR_S:
MONO_INST_NEW (cfg, ins, OP_BR);
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_BEQ_S:
case MONO_CEE_BGE_S:
case MONO_CEE_BGT_S:
case MONO_CEE_BLE_S:
case MONO_CEE_BLT_S:
case MONO_CEE_BNE_UN_S:
case MONO_CEE_BGE_UN_S:
case MONO_CEE_BGT_UN_S:
case MONO_CEE_BLE_UN_S:
case MONO_CEE_BLT_UN_S:
MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET);
ADD_BINCOND (NULL);
sp = stack_start;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_BR:
MONO_INST_NEW (cfg, ins, OP_BR);
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_BRFALSE_S:
case MONO_CEE_BRTRUE_S:
case MONO_CEE_BRFALSE:
case MONO_CEE_BRTRUE: {
MonoInst *cmp;
gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE;
if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
UNVERIFIED;
sp--;
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
GET_BBLOCK (cfg, tblock, next_ip);
link_bblock (cfg, cfg->cbb, tblock);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
CHECK_UNVERIFIABLE (cfg);
}
MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
cmp->sreg1 = sp [0]->dreg;
type_from_op (cfg, cmp, sp [0], NULL);
CHECK_TYPE (cmp);
#if SIZEOF_REGISTER == 4
if (cmp->opcode == OP_LCOMPARE_IMM) {
/* Convert it to OP_LCOMPARE */
MONO_INST_NEW (cfg, ins, OP_I8CONST);
ins->type = STACK_I8;
ins->dreg = alloc_dreg (cfg, STACK_I8);
ins->inst_l = 0;
MONO_ADD_INS (cfg->cbb, ins);
cmp->opcode = OP_LCOMPARE;
cmp->sreg2 = ins->dreg;
}
#endif
MONO_ADD_INS (cfg->cbb, cmp);
MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
type_from_op (cfg, ins, sp [0], NULL);
MONO_ADD_INS (cfg->cbb, ins);
ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
GET_BBLOCK (cfg, tblock, target);
ins->inst_true_bb = tblock;
GET_BBLOCK (cfg, tblock, next_ip);
ins->inst_false_bb = tblock;
start_new_bblock = 2;
sp = stack_start;
inline_costs += BRANCH_COST;
break;
}
case MONO_CEE_BEQ:
case MONO_CEE_BGE:
case MONO_CEE_BGT:
case MONO_CEE_BLE:
case MONO_CEE_BLT:
case MONO_CEE_BNE_UN:
case MONO_CEE_BGE_UN:
case MONO_CEE_BGT_UN:
case MONO_CEE_BLE_UN:
case MONO_CEE_BLT_UN:
MONO_INST_NEW (cfg, ins, il_op);
ADD_BINCOND (NULL);
sp = stack_start;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_SWITCH: {
MonoInst *src1;
MonoBasicBlock **targets;
MonoBasicBlock *default_bblock;
MonoJumpInfoBBTable *table;
int offset_reg = alloc_preg (cfg);
int target_reg = alloc_preg (cfg);
int table_reg = alloc_preg (cfg);
int sum_reg = alloc_preg (cfg);
gboolean use_op_switch;
n = read32 (ip + 1);
--sp;
src1 = sp [0];
if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
UNVERIFIED;
ip += 5;
GET_BBLOCK (cfg, default_bblock, next_ip);
default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
for (i = 0; i < n; ++i) {
GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip));
targets [i] = tblock;
targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
ip += 4;
}
if (sp != stack_start) {
/*
* Link the current bb with the targets as well, so handle_stack_args
* will set their in_stack correctly.
*/
link_bblock (cfg, cfg->cbb, default_bblock);
for (i = 0; i < n; ++i)
link_bblock (cfg, cfg->cbb, targets [i]);
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
/* Undo the links */
mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
for (i = 0; i < n; ++i)
mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
}
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
for (i = 0; i < n; ++i)
link_bblock (cfg, cfg->cbb, targets [i]);
table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
table->table = targets;
table->table_size = n;
use_op_switch = FALSE;
#ifdef TARGET_ARM
/* ARM implements SWITCH statements differently */
/* FIXME: Make it use the generic implementation */
if (!cfg->compile_aot)
use_op_switch = TRUE;
#endif
if (COMPILE_LLVM (cfg))
use_op_switch = TRUE;
cfg->cbb->has_jump_table = 1;
if (use_op_switch) {
MONO_INST_NEW (cfg, ins, OP_SWITCH);
ins->sreg1 = src1->dreg;
ins->inst_p0 = table;
ins->inst_many_bb = targets;
ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
MONO_ADD_INS (cfg->cbb, ins);
} else {
if (TARGET_SIZEOF_VOID_P == 8)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
else
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
#if SIZEOF_REGISTER == 8
/* The upper word might not be zero, and we add it to a 64 bit address later */
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
#endif
if (cfg->compile_aot) {
MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
} else {
MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
ins->inst_p0 = table;
ins->dreg = table_reg;
MONO_ADD_INS (cfg->cbb, ins);
}
/* FIXME: Use load_memindex */
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
}
start_new_bblock = 1;
inline_costs += BRANCH_COST * 2;
break;
}
case MONO_CEE_LDIND_I1:
case MONO_CEE_LDIND_U1:
case MONO_CEE_LDIND_I2:
case MONO_CEE_LDIND_U2:
case MONO_CEE_LDIND_I4:
case MONO_CEE_LDIND_U4:
case MONO_CEE_LDIND_I8:
case MONO_CEE_LDIND_I:
case MONO_CEE_LDIND_R4:
case MONO_CEE_LDIND_R8:
case MONO_CEE_LDIND_REF:
--sp;
if (!(ins_flag & MONO_INST_NONULLCHECK))
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE);
ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag);
*sp++ = ins;
ins_flag = 0;
break;
case MONO_CEE_STIND_REF:
case MONO_CEE_STIND_I1:
case MONO_CEE_STIND_I2:
case MONO_CEE_STIND_I4:
case MONO_CEE_STIND_I8:
case MONO_CEE_STIND_R4:
case MONO_CEE_STIND_R8:
case MONO_CEE_STIND_I: {
sp -= 2;
if (il_op == MONO_CEE_STIND_REF && sp [1]->type != STACK_OBJ) {
/* stind.ref must only be used with object references. */
UNVERIFIED;
}
if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8)
sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]);
mini_emit_memory_store (cfg, m_class_get_byval_arg (stind_to_type (il_op)), sp [0], sp [1], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
}
case MONO_CEE_MUL:
MONO_INST_NEW (cfg, ins, il_op);
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
/* Use the immediate opcodes if possible */
int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) {
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
ins->sreg2 = -1;
NULLIFY_INS (sp [1]);
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
*sp++ = mono_decompose_opcode (cfg, ins);
break;
case MONO_CEE_ADD:
case MONO_CEE_SUB:
case MONO_CEE_DIV:
case MONO_CEE_DIV_UN:
case MONO_CEE_REM:
case MONO_CEE_REM_UN:
case MONO_CEE_AND:
case MONO_CEE_OR:
case MONO_CEE_XOR:
case MONO_CEE_SHL:
case MONO_CEE_SHR:
case MONO_CEE_SHR_UN: {
MONO_INST_NEW (cfg, ins, il_op);
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
add_widen_op (cfg, ins, &sp [0], &sp [1]);
ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
/* Use the immediate opcodes if possible */
int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) &&
mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
if (sp [1]->opcode == OP_I8CONST) {
#if SIZEOF_REGISTER == 8
ins->inst_imm = sp [1]->inst_l;
#else
ins->inst_l = sp [1]->inst_l;
#endif
} else {
ins->inst_imm = (gssize)(sp [1]->inst_c0);
}
ins->sreg2 = -1;
/* Might be followed by an instruction added by add_widen_op */
if (sp [1]->next == NULL)
NULLIFY_INS (sp [1]);
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
*sp++ = mono_decompose_opcode (cfg, ins);
break;
}
case MONO_CEE_NEG:
case MONO_CEE_NOT:
case MONO_CEE_CONV_I1:
case MONO_CEE_CONV_I2:
case MONO_CEE_CONV_I4:
case MONO_CEE_CONV_R4:
case MONO_CEE_CONV_R8:
case MONO_CEE_CONV_U4:
case MONO_CEE_CONV_I8:
case MONO_CEE_CONV_U8:
case MONO_CEE_CONV_OVF_I8:
case MONO_CEE_CONV_OVF_U8:
case MONO_CEE_CONV_R_UN:
/* Special case this earlier so we have long constants in the IR */
if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) {
int data = sp [-1]->inst_c0;
sp [-1]->opcode = OP_I8CONST;
sp [-1]->type = STACK_I8;
#if SIZEOF_REGISTER == 8
if (il_op == MONO_CEE_CONV_U8)
sp [-1]->inst_c0 = (guint32)data;
else
sp [-1]->inst_c0 = data;
#else
if (il_op == MONO_CEE_CONV_U8)
sp [-1]->inst_l = (guint32)data;
else
sp [-1]->inst_l = data;
#endif
sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
}
else {
ADD_UNOP (il_op);
}
break;
case MONO_CEE_CONV_OVF_I4:
case MONO_CEE_CONV_OVF_I1:
case MONO_CEE_CONV_OVF_I2:
case MONO_CEE_CONV_OVF_I:
case MONO_CEE_CONV_OVF_I1_UN:
case MONO_CEE_CONV_OVF_I2_UN:
case MONO_CEE_CONV_OVF_I4_UN:
case MONO_CEE_CONV_OVF_I8_UN:
case MONO_CEE_CONV_OVF_I_UN:
if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
/* floats are always signed, _UN has no effect */
ADD_UNOP (CEE_CONV_OVF_I8);
if (il_op == MONO_CEE_CONV_OVF_I1_UN)
ADD_UNOP (MONO_CEE_CONV_OVF_I1);
else if (il_op == MONO_CEE_CONV_OVF_I2_UN)
ADD_UNOP (MONO_CEE_CONV_OVF_I2);
else if (il_op == MONO_CEE_CONV_OVF_I4_UN)
ADD_UNOP (MONO_CEE_CONV_OVF_I4);
else if (il_op == MONO_CEE_CONV_OVF_I8_UN)
;
else
ADD_UNOP (il_op);
} else {
ADD_UNOP (il_op);
}
break;
case MONO_CEE_CONV_OVF_U1:
case MONO_CEE_CONV_OVF_U2:
case MONO_CEE_CONV_OVF_U4:
case MONO_CEE_CONV_OVF_U:
case MONO_CEE_CONV_OVF_U1_UN:
case MONO_CEE_CONV_OVF_U2_UN:
case MONO_CEE_CONV_OVF_U4_UN:
case MONO_CEE_CONV_OVF_U8_UN:
case MONO_CEE_CONV_OVF_U_UN:
if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
/* floats are always signed, _UN has no effect */
ADD_UNOP (CEE_CONV_OVF_U8);
ADD_UNOP (il_op);
} else {
ADD_UNOP (il_op);
}
break;
case MONO_CEE_CONV_U2:
case MONO_CEE_CONV_U1:
case MONO_CEE_CONV_I:
case MONO_CEE_CONV_U:
ADD_UNOP (il_op);
CHECK_CFG_EXCEPTION;
break;
case MONO_CEE_ADD_OVF:
case MONO_CEE_ADD_OVF_UN:
case MONO_CEE_MUL_OVF:
case MONO_CEE_MUL_OVF_UN:
case MONO_CEE_SUB_OVF:
case MONO_CEE_SUB_OVF_UN:
MONO_INST_NEW (cfg, ins, il_op);
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
if (ovf_exc)
ins->inst_exc_name = ovf_exc;
else
ins->inst_exc_name = "OverflowException";
/* Have to insert a widening op */
add_widen_op (cfg, ins, &sp [0], &sp [1]);
ins->dreg = alloc_dreg (cfg, (MonoStackType)(ins)->type);
MONO_ADD_INS ((cfg)->cbb, ins);
/* The opcode might be emulated, so need to special case this */
if (ovf_exc && mono_find_jit_opcode_emulation (ins->opcode)) {
switch (ins->opcode) {
case OP_IMUL_OVF_UN:
/* This opcode is just a placeholder, it will be emulated also */
ins->opcode = OP_IMUL_OVF_UN_OOM;
break;
case OP_LMUL_OVF_UN:
/* This opcode is just a placeholder, it will be emulated also */
ins->opcode = OP_LMUL_OVF_UN_OOM;
break;
default:
g_assert_not_reached ();
}
}
ovf_exc = NULL;
*sp++ = mono_decompose_opcode (cfg, ins);
break;
case MONO_CEE_CPOBJ:
GSHAREDVT_FAILURE (il_op);
GSHAREDVT_FAILURE (*ip);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
sp -= 2;
mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
ins_flag = 0;
break;
case MONO_CEE_LDOBJ: {
int loc_index = -1;
int stloc_len = 0;
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
/* Optimize the common ldobj+stloc combination */
if (next_ip < end) {
switch (next_ip [0]) {
case MONO_CEE_STLOC_S:
CHECK_OPSIZE (7);
loc_index = next_ip [1];
stloc_len = 2;
break;
case MONO_CEE_STLOC_0:
case MONO_CEE_STLOC_1:
case MONO_CEE_STLOC_2:
case MONO_CEE_STLOC_3:
loc_index = next_ip [0] - CEE_STLOC_0;
stloc_len = 1;
break;
default:
break;
}
}
if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) {
CHECK_LOCAL (loc_index);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0);
ins->dreg = cfg->locals [loc_index]->dreg;
ins->flags |= ins_flag;
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip += stloc_len;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
break;
}
/* Optimize the ldobj+stobj combination */
if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) {
CHECK_STACK (1);
sp --;
mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip += 5;
ins_flag = 0;
break;
}
ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag);
*sp++ = ins;
ins_flag = 0;
inline_costs += 1;
break;
}
case MONO_CEE_LDSTR:
if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
ins->type = STACK_OBJ;
*sp = ins;
}
else if (method->wrapper_type != MONO_WRAPPER_NONE) {
MonoInst *iargs [1];
char *str = (char *)mono_method_get_wrapper_data (method, n);
if (cfg->compile_aot)
EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
else
EMIT_NEW_PCONST (cfg, iargs [0], str);
*sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs);
} else {
{
if (cfg->cbb->out_of_line) {
MonoInst *iargs [2];
if (image == mono_defaults.corlib) {
/*
* Avoid relocations in AOT and save some space by using a
* version of helper_ldstr specialized to mscorlib.
*/
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
*sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
} else {
/* Avoid creating the string object */
EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
*sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
}
}
else
if (cfg->compile_aot) {
NEW_LDSTRCONST (cfg, ins, image, n);
*sp = ins;
MONO_ADD_INS (cfg->cbb, ins);
}
else {
NEW_PCONST (cfg, ins, NULL);
ins->type = STACK_OBJ;
ins->inst_p0 = mono_ldstr_checked (image, mono_metadata_token_index (n), cfg->error);
CHECK_CFG_ERROR;
if (!ins->inst_p0)
OUT_OF_MEMORY_FAILURE;
*sp = ins;
MONO_ADD_INS (cfg->cbb, ins);
}
}
}
sp++;
break;
case MONO_CEE_NEWOBJ: {
MonoInst *iargs [2];
MonoMethodSignature *fsig;
MonoInst this_ins;
MonoInst *alloc;
MonoInst *vtable_arg = NULL;
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
CHECK_CFG_ERROR;
fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
mono_save_token_info (cfg, image, token, cmethod);
if (!mono_class_init_internal (cmethod->klass))
TYPE_LOAD_ERROR (cmethod->klass);
context_used = mini_method_check_context_used (cfg, cmethod);
if (!dont_verify && !cfg->skip_visibility) {
MonoMethod *cil_method = cmethod;
MonoMethod *target_method = cil_method;
if (method->is_inflated) {
MonoGenericContainer *container = mono_method_get_generic_container(method_definition);
MonoGenericContext *context = (container != NULL ? &container->context : NULL);
target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error);
CHECK_CFG_ERROR;
}
if (!mono_method_can_access_method (method_definition, target_method) &&
!mono_method_can_access_method (method, cil_method))
emit_method_access_failure (cfg, method, cil_method);
}
if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
/*
if (cfg->gsharedvt) {
if (mini_is_gsharedvt_variable_signature (sig))
GSHAREDVT_FAILURE (il_op);
}
*/
n = fsig->param_count;
CHECK_STACK (n);
/*
* Generate smaller code for the common newobj <exception> instruction in
* argument checking code.
*/
if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
is_exception_class (cmethod->klass) && n <= 2 &&
((n < 1) || (!m_type_is_byref (fsig->params [0]) && fsig->params [0]->type == MONO_TYPE_STRING)) &&
((n < 2) || (!m_type_is_byref (fsig->params [1]) && fsig->params [1]->type == MONO_TYPE_STRING))) {
MonoInst *iargs [3];
sp -= n;
EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass));
switch (n) {
case 0:
*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
break;
case 1:
iargs [1] = sp [0];
*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
break;
case 2:
iargs [1] = sp [0];
iargs [2] = sp [1];
*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
break;
default:
g_assert_not_reached ();
}
inline_costs += 5;
break;
}
/* move the args to allow room for 'this' in the first position */
while (n--) {
--sp;
sp [1] = sp [0];
}
for (int i = 0; i < fsig->param_count; ++i)
sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
/* check_call_signature () requires sp[0] to be set */
this_ins.type = STACK_OBJ;
sp [0] = &this_ins;
if (check_call_signature (cfg, fsig, sp))
UNVERIFIED;
iargs [0] = NULL;
if (mini_class_is_system_array (cmethod->klass)) {
*sp = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
MonoJitICallId function = MONO_JIT_ICALL_ZeroIsReserved;
int rank = m_class_get_rank (cmethod->klass);
int n = fsig->param_count;
/* Optimize the common cases, use ctor using length for each rank (no lbound). */
if (n == rank) {
switch (n) {
case 1: function = MONO_JIT_ICALL_mono_array_new_1;
break;
case 2: function = MONO_JIT_ICALL_mono_array_new_2;
break;
case 3: function = MONO_JIT_ICALL_mono_array_new_3;
break;
case 4: function = MONO_JIT_ICALL_mono_array_new_4;
break;
default:
break;
}
}
/* Regular case, rank > 4 or legnth, lbound specified per rank. */
if (function == MONO_JIT_ICALL_ZeroIsReserved) {
// FIXME Maximum value of param_count? Realistically 64. Fits in imm?
if (!array_new_localalloc_ins) {
MONO_INST_NEW (cfg, array_new_localalloc_ins, OP_LOCALLOC_IMM);
array_new_localalloc_ins->dreg = alloc_preg (cfg);
cfg->flags |= MONO_CFG_HAS_ALLOCA;
MONO_ADD_INS (init_localsbb, array_new_localalloc_ins);
}
array_new_localalloc_ins->inst_imm = MAX (array_new_localalloc_ins->inst_imm, n * sizeof (target_mgreg_t));
int dreg = array_new_localalloc_ins->dreg;
if (2 * rank == n) {
/* [lbound, length, lbound, length, ...]
* mono_array_new_n_icall expects a non-interleaved list of
* lbounds and lengths, so deinterleave here.
*/
for (int l = 0; l < 2; ++l) {
int src = l;
int dst = l * rank;
for (int r = 0; r < rank; ++r, src += 2, ++dst) {
NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, dst * sizeof (target_mgreg_t), sp [src + 1]->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
}
} else {
/* [length, length, length, ...] */
for (int i = 0; i < n; ++i) {
NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
}
EMIT_NEW_ICONST (cfg, ins, n);
sp [1] = ins;
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), dreg);
ins->type = STACK_PTR;
sp [2] = ins;
// FIXME Adjust sp by n - 3? Attempts failed.
function = MONO_JIT_ICALL_mono_array_new_n_icall;
}
alloc = mono_emit_jit_icall_id (cfg, function, sp);
} else if (cmethod->string_ctor) {
g_assert (!context_used);
g_assert (!vtable_arg);
/* we simply pass a null pointer */
EMIT_NEW_PCONST (cfg, *sp, NULL);
/* now call the string ctor */
alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
} else {
if (m_class_is_valuetype (cmethod->klass)) {
iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL);
mini_emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass));
EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
alloc = NULL;
/*
* The code generated by mini_emit_virtual_call () expects
* iargs [0] to be a boxed instance, but luckily the vcall
* will be transformed into a normal call there.
*/
} else if (context_used) {
alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
*sp = alloc;
} else {
MonoVTable *vtable = NULL;
if (!cfg->compile_aot)
vtable = mono_class_vtable_checked (cmethod->klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
/*
* TypeInitializationExceptions thrown from the mono_runtime_class_init
* call in mono_jit_runtime_invoke () can abort the finalizer thread.
* As a workaround, we call class cctors before allocating objects.
*/
if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
emit_class_init (cfg, cmethod->klass);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass));
class_inits = g_slist_prepend (class_inits, cmethod->klass);
}
alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
*sp = alloc;
}
CHECK_CFG_EXCEPTION; /*for handle_alloc*/
if (alloc)
MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
/* Now call the actual ctor */
int ctor_inline_costs = 0;
handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &ctor_inline_costs);
// don't contribute to inline_const if ctor has [MethodImpl(MethodImplOptions.AggressiveInlining)]
if (!COMPILE_LLVM(cfg) || !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
inline_costs += ctor_inline_costs;
CHECK_CFG_EXCEPTION;
}
if (alloc == NULL) {
/* Valuetype */
EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins);
*sp++= ins;
} else {
*sp++ = alloc;
}
inline_costs += 5;
if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
break;
}
case MONO_CEE_CASTCLASS:
case MONO_CEE_ISINST: {
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = (*sp)->dreg;
ins->klass = klass;
ins->type = STACK_OBJ;
MONO_ADD_INS (cfg->cbb, ins);
CHECK_CFG_EXCEPTION;
*sp++ = ins;
cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
break;
}
case MONO_CEE_UNBOX_ANY: {
MonoInst *res, *addr;
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
if (mini_is_gsharedvt_klass (klass)) {
res = handle_unbox_gsharedvt (cfg, klass, *sp);
inline_costs += 2;
} else if (mini_class_is_reference (klass)) {
if (MONO_INS_IS_PCONST_NULL (*sp)) {
EMIT_NEW_PCONST (cfg, res, NULL);
res->type = STACK_OBJ;
} else {
MONO_INST_NEW (cfg, res, OP_CASTCLASS);
res->dreg = alloc_preg (cfg);
res->sreg1 = (*sp)->dreg;
res->klass = klass;
res->type = STACK_OBJ;
MONO_ADD_INS (cfg->cbb, res);
cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
}
} else if (mono_class_is_nullable (klass)) {
res = handle_unbox_nullable (cfg, *sp, klass, context_used);
} else {
addr = mini_handle_unbox (cfg, klass, *sp, context_used);
/* LDOBJ */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
res = ins;
inline_costs += 2;
}
*sp ++ = res;
break;
}
case MONO_CEE_BOX: {
MonoInst *val;
MonoClass *enum_class;
MonoMethod *has_flag;
MonoMethodSignature *has_flag_sig;
--sp;
val = *sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
if (mini_class_is_reference (klass)) {
*sp++ = val;
break;
}
val = convert_value (cfg, m_class_get_byval_arg (klass), val);
if (klass == mono_defaults.void_class)
UNVERIFIED;
if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val))
UNVERIFIED;
/* frequent check in generic code: box (struct), brtrue */
/*
* Look for:
*
* <push int/long ptr>
* <push int/long>
* box MyFlags
* constrained. MyFlags
* callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
*
* If we find this sequence and the operand types on box and constrained
* are equal, we can emit a specialized instruction sequence instead of
* the very slow HasFlag () call.
* This code sequence is generated by older mcs/csc, the newer one is handled in
* emit_inst_for_method ().
*/
guint32 constrained_token;
guint32 callvirt_token;
if ((cfg->opt & MONO_OPT_INTRINS) &&
// FIXME ip_in_bb as we go?
next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
(ip = il_read_constrained (next_ip, end, &constrained_token)) &&
ip_in_bb (cfg, cfg->cbb, ip) &&
(ip = il_read_callvirt (ip, end, &callvirt_token)) &&
ip_in_bb (cfg, cfg->cbb, ip) &&
m_class_is_enumtype (klass) &&
(enum_class = mini_get_class (method, constrained_token, generic_context)) &&
(has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) &&
has_flag->klass == mono_defaults.enum_class &&
!strcmp (has_flag->name, "HasFlag") &&
(has_flag_sig = mono_method_signature_internal (has_flag)) &&
has_flag_sig->hasthis &&
has_flag_sig->param_count == 1) {
CHECK_TYPELOAD (enum_class);
if (enum_class == klass) {
MonoInst *enum_this, *enum_flag;
next_ip = ip;
il_op = MONO_CEE_CALLVIRT;
--sp;
enum_this = sp [0];
enum_flag = sp [1];
*sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag);
break;
}
}
guint32 unbox_any_token;
/*
* Common in generic code:
* box T1, unbox.any T2.
*/
if ((cfg->opt & MONO_OPT_INTRINS) &&
next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
(ip = il_read_unbox_any (next_ip, end, &unbox_any_token))) {
MonoClass *unbox_klass = mini_get_class (method, unbox_any_token, generic_context);
CHECK_TYPELOAD (unbox_klass);
if (klass == unbox_klass) {
next_ip = ip;
*sp++ = val;
break;
}
}
// Optimize
//
// box
// call object::GetType()
//
guint32 gettype_token;
if ((ip = il_read_call(next_ip, end, &gettype_token)) && ip_in_bb (cfg, cfg->cbb, ip)) {
MonoMethod* gettype_method = mini_get_method (cfg, method, gettype_token, NULL, generic_context);
if (!strcmp (gettype_method->name, "GetType") && gettype_method->klass == mono_defaults.object_class) {
mono_class_init_internal(klass);
if (mono_class_get_checked (m_class_get_image (klass), m_class_get_type_token (klass), error) == klass) {
if (cfg->compile_aot) {
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (klass), m_class_get_type_token (klass), generic_context);
} else {
MonoType *klass_type = m_class_get_byval_arg (klass);
MonoReflectionType* reflection_type = mono_type_get_object_checked (klass_type, cfg->error);
EMIT_NEW_PCONST (cfg, ins, reflection_type);
}
ins->type = STACK_OBJ;
ins->klass = mono_defaults.systemtype_class;
*sp++ = ins;
next_ip = ip;
break;
}
}
}
// Optimize
//
// box
// ldnull
// ceq (or cgt.un)
//
// to just
//
// ldc.i4.0 (or 1)
guchar* ldnull_ip;
if ((ldnull_ip = il_read_op (next_ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) {
gboolean is_eq = FALSE, is_neq = FALSE;
if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ)))
is_eq = TRUE;
else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN)))
is_neq = TRUE;
if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) &&
!mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) {
next_ip = ip;
il_op = (MonoOpcodeEnum) (is_eq ? CEE_LDC_I4_0 : CEE_LDC_I4_1);
EMIT_NEW_ICONST (cfg, ins, is_eq ? 0 : 1);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
}
guint32 isinst_tk = 0;
if ((ip = il_read_op_and_token (next_ip, end, CEE_ISINST, MONO_CEE_ISINST, &isinst_tk)) &&
ip_in_bb (cfg, cfg->cbb, ip)) {
MonoClass *isinst_class = mini_get_class (method, isinst_tk, generic_context);
if (!mono_class_is_nullable (klass) && !mono_class_is_nullable (isinst_class) &&
!mini_is_gsharedvt_variable_klass (klass) && !mini_is_gsharedvt_variable_klass (isinst_class) &&
!mono_class_is_open_constructed_type (m_class_get_byval_arg (klass)) &&
!mono_class_is_open_constructed_type (m_class_get_byval_arg (isinst_class))) {
// Optimize
//
// box
// isinst [Type]
// brfalse/brtrue
//
// to
//
// ldc.i4.0 (or 1)
// brfalse/brtrue
//
guchar* br_ip = NULL;
if ((br_ip = il_read_brtrue (ip, end, &target)) || (br_ip = il_read_brtrue_s (ip, end, &target)) ||
(br_ip = il_read_brfalse (ip, end, &target)) || (br_ip = il_read_brfalse_s (ip, end, &target))) {
gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass);
next_ip = ip;
il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0);
EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
// Optimize
//
// box
// isinst [Type]
// ldnull
// ceq/cgt.un
//
// to
//
// ldc.i4.0 (or 1)
//
guchar* ldnull_ip = NULL;
if ((ldnull_ip = il_read_op (ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) {
gboolean is_eq = FALSE, is_neq = FALSE;
if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ)))
is_eq = TRUE;
else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN)))
is_neq = TRUE;
if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) &&
!mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) {
gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass);
next_ip = ip;
if (is_eq)
isinst = !isinst;
il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0);
EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
}
// Optimize
//
// box
// isinst [Type]
// unbox.any
//
// to
//
// nop
//
guchar* unbox_ip = NULL;
guint32 unbox_token = 0;
if ((unbox_ip = il_read_unbox_any (ip, end, &unbox_token)) && ip_in_bb (cfg, cfg->cbb, unbox_ip)) {
MonoClass *unbox_klass = mini_get_class (method, unbox_token, generic_context);
CHECK_TYPELOAD (unbox_klass);
if (!mono_class_is_nullable (unbox_klass) &&
!mini_is_gsharedvt_klass (unbox_klass) &&
klass == isinst_class &&
klass == unbox_klass)
{
*sp++ = val;
next_ip = unbox_ip;
break;
}
}
}
}
gboolean is_true;
// FIXME: LLVM can't handle the inconsistent bb linking
if (!mono_class_is_nullable (klass) &&
!mini_is_gsharedvt_klass (klass) &&
next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) ||
(is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) ||
(ip = il_read_brfalse (next_ip, end, &target)) ||
(ip = il_read_brfalse_s (next_ip, end, &target)))) {
int dreg;
MonoBasicBlock *true_bb, *false_bb;
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip = ip;
if (cfg->verbose_level > 3) {
printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
printf ("<box+brtrue opt>\n");
}
/*
* We need to link both bblocks, since it is needed for handling stack
* arguments correctly (See test_0_box_brtrue_opt_regress_81102).
* Branching to only one of them would lead to inconsistencies, so
* generate an ICONST+BRTRUE, the branch opts will get rid of them.
*/
GET_BBLOCK (cfg, true_bb, target);
GET_BBLOCK (cfg, false_bb, next_ip);
mono_link_bblock (cfg, cfg->cbb, true_bb);
mono_link_bblock (cfg, cfg->cbb, false_bb);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
if (COMPILE_LLVM (cfg)) {
dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
} else {
/* The JIT can't eliminate the iconst+compare */
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = is_true ? true_bb : false_bb;
MONO_ADD_INS (cfg->cbb, ins);
}
start_new_bblock = 1;
break;
}
if (m_class_is_enumtype (klass) && !mini_is_gsharedvt_klass (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) {
/* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */
if (val->opcode == OP_ICONST) {
MONO_INST_NEW (cfg, ins, OP_BOX_ICONST);
ins->type = STACK_OBJ;
ins->klass = klass;
ins->inst_c0 = val->inst_c0;
ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
} else {
MONO_INST_NEW (cfg, ins, OP_BOX);
ins->type = STACK_OBJ;
ins->klass = klass;
ins->sreg1 = val->dreg;
ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
}
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
} else {
*sp++ = mini_emit_box (cfg, val, klass, context_used);
}
CHECK_CFG_EXCEPTION;
inline_costs += 1;
break;
}
case MONO_CEE_UNBOX: {
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
if (mono_class_is_nullable (klass)) {
MonoInst *val;
val = handle_unbox_nullable (cfg, *sp, klass, context_used);
EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass));
*sp++= ins;
} else {
ins = mini_handle_unbox (cfg, klass, *sp, context_used);
*sp++ = ins;
}
inline_costs += 2;
break;
}
case MONO_CEE_LDFLD:
case MONO_CEE_LDFLDA:
case MONO_CEE_STFLD:
case MONO_CEE_LDSFLD:
case MONO_CEE_LDSFLDA:
case MONO_CEE_STSFLD: {
MonoClassField *field;
guint foffset;
gboolean is_instance;
gpointer addr = NULL;
gboolean is_special_static;
MonoType *ftype;
MonoInst *store_val = NULL;
MonoInst *thread_ins;
is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD);
if (is_instance) {
if (il_op == MONO_CEE_STFLD) {
sp -= 2;
store_val = sp [1];
} else {
--sp;
}
if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
UNVERIFIED;
if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE)
UNVERIFIED;
} else {
if (il_op == MONO_CEE_STSFLD) {
sp--;
store_val = sp [0];
}
}
if (method->wrapper_type != MONO_WRAPPER_NONE) {
field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
klass = m_field_get_parent (field);
}
else {
klass = NULL;
field = mono_field_from_token_checked (image, token, &klass, generic_context, cfg->error);
if (!field)
CHECK_TYPELOAD (klass);
CHECK_CFG_ERROR;
}
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
FIELD_ACCESS_FAILURE (method, field);
mono_class_init_internal (klass);
mono_class_setup_fields (klass);
ftype = mono_field_get_type_internal (field);
/*
* LDFLD etc. is usable on static fields as well, so convert those cases to
* the static case.
*/
if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
switch (il_op) {
case MONO_CEE_LDFLD:
il_op = MONO_CEE_LDSFLD;
break;
case MONO_CEE_STFLD:
il_op = MONO_CEE_STSFLD;
break;
case MONO_CEE_LDFLDA:
il_op = MONO_CEE_LDSFLDA;
break;
default:
g_assert_not_reached ();
}
is_instance = FALSE;
}
context_used = mini_class_check_context_used (cfg, klass);
if (il_op == MONO_CEE_LDSFLD) {
ins = mini_emit_inst_for_field_load (cfg, field);
if (ins) {
*sp++ = ins;
goto field_access_end;
}
}
/* INSTANCE CASE */
if (is_instance)
g_assert (field->offset);
foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
if (il_op == MONO_CEE_STFLD) {
sp [1] = convert_value (cfg, field->type, sp [1]);
if (target_type_is_incompatible (cfg, field->type, sp [1]))
UNVERIFIED;
{
MonoInst *store;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
context_used = mini_class_check_context_used (cfg, klass);
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
store = mini_emit_storing_write_barrier (cfg, ins, sp [1]);
} else {
/* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
}
} else {
if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
/* insert call to write barrier */
MonoInst *ptr;
int dreg;
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
store = mini_emit_storing_write_barrier (cfg, ptr, sp [1]);
} else {
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
}
}
if (sp [0]->opcode != OP_LDADDR)
store->flags |= MONO_INST_FAULT;
store->flags |= ins_flag;
}
goto field_access_end;
}
if (is_instance) {
if (sp [0]->type == STACK_VTYPE) {
MonoInst *var;
/* Have to compute the address of the variable */
var = get_vreg_to_inst (cfg, sp [0]->dreg);
if (!var)
var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg);
else
g_assert (var->klass == klass);
EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass));
sp [0] = ins;
}
if (il_op == MONO_CEE_LDFLDA) {
if (sp [0]->type == STACK_OBJ) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
}
dreg = alloc_ireg_mp (cfg);
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
} else {
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
}
ins->klass = mono_class_from_mono_type_internal (field->type);
ins->type = STACK_MP;
*sp++ = ins;
} else {
MonoInst *load;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) {
ins = mono_emit_simd_field_load (cfg, field, sp [0]);
if (ins) {
*sp++ = ins;
goto field_access_end;
}
}
#endif
MonoInst *field_add_inst = sp [0];
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
foffset = 0;
}
load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
if (sp [0]->opcode != OP_LDADDR)
load->flags |= MONO_INST_FAULT;
*sp++ = load;
}
}
if (is_instance)
goto field_access_end;
/* STATIC CASE */
context_used = mini_class_check_context_used (cfg, klass);
if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
mono_error_set_field_missing (cfg->error, m_field_get_parent (field), field->name, NULL, "Using static instructions with literal field");
CHECK_CFG_ERROR;
}
/* The special_static_fields field is init'd in mono_class_vtable, so it needs
* to be called here.
*/
if (!context_used) {
mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
}
addr = mono_special_static_field_get_offset (field, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
is_special_static = mono_class_field_is_special_static (field);
if (is_special_static && ((gsize)addr & 0x80000000) == 0)
thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
else
thread_ins = NULL;
/* Generate IR to compute the field address */
if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins &&
!(context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))) {
/*
* Fast access to TLS data
* Inline version of get_thread_static_data () in
* threads.c.
*/
guint32 offset;
int idx, static_data_reg, array_reg, dreg;
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
if (cfg->compile_aot || context_used) {
int offset_reg, offset2_reg, idx_reg;
/* For TLS variables, this will return the TLS offset */
if (context_used) {
MonoInst *addr_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, addr_ins->dreg, addr_ins->dreg, 1);
} else {
EMIT_NEW_SFLDACONST (cfg, ins, field);
}
offset_reg = ins->dreg;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
idx_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
offset2_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
} else {
offset = (gsize)addr & 0x7fffffff;
idx = offset & 0x3f;
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
}
} else if ((cfg->compile_aot && is_special_static) ||
(context_used && is_special_static)) {
MonoInst *iargs [1];
g_assert (m_field_get_parent (field));
if (context_used) {
iargs [0] = emit_get_rgctx_field (cfg, context_used,
field, MONO_RGCTX_INFO_CLASS_FIELD);
} else {
EMIT_NEW_FIELDCONST (cfg, iargs [0], field);
}
ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
} else if (context_used) {
MonoInst *static_data;
/*
g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
method->klass->name_space, method->klass->name, method->name,
depth, field->offset);
*/
if (mono_class_needs_cctor_run (klass, method))
emit_class_init (cfg, klass);
/*
* The pointer we're computing here is
*
* super_info.static_data + field->offset
*/
static_data = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_STATIC_DATA);
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
} else if (field->offset == 0) {
ins = static_data;
} else {
int addr_reg = mono_alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
}
} else if (cfg->compile_aot && addr) {
MonoInst *iargs [1];
g_assert (m_field_get_parent (field));
EMIT_NEW_FIELDCONST (cfg, iargs [0], field);
ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
} else {
MonoVTable *vtable = NULL;
if (!cfg->compile_aot)
vtable = mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
if (!addr) {
if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
if (!(g_slist_find (class_inits, klass))) {
emit_class_init (cfg, klass);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field));
class_inits = g_slist_prepend (class_inits, klass);
}
} else {
if (cfg->run_cctors) {
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
g_assert (vtable);
if (!vtable->initialized && m_class_has_cctor (vtable->klass))
INLINE_FAILURE ("class init");
if (!mono_runtime_class_init_full (vtable, cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
}
}
}
if (cfg->compile_aot)
EMIT_NEW_SFLDACONST (cfg, ins, field);
else {
g_assert (vtable);
addr = mono_static_field_get_addr (vtable, field);
g_assert (addr);
EMIT_NEW_PCONST (cfg, ins, addr);
}
} else {
MonoInst *iargs [1];
EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
}
}
/* Generate IR to do the actual load/store operation */
if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD)) {
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
} else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (ftype)) {
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
}
if (il_op == MONO_CEE_LDSFLDA) {
ins->klass = mono_class_from_mono_type_internal (ftype);
ins->type = STACK_PTR;
*sp++ = ins;
} else if (il_op == MONO_CEE_STSFLD) {
MonoInst *store;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
store->flags |= ins_flag;
} else {
gboolean is_const = FALSE;
MonoVTable *vtable = NULL;
gpointer addr = NULL;
if (!context_used) {
vtable = mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
}
if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
(!context_used && !cfg->compile_aot && vtable->initialized))) {
int ro_type = ftype->type;
if (!addr)
addr = mono_static_field_get_addr (vtable, field);
if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) {
ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type;
}
GSHAREDVT_FAILURE (il_op);
/* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
is_const = TRUE;
switch (ro_type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
sp++;
break;
case MONO_TYPE_I1:
EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
sp++;
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
sp++;
break;
case MONO_TYPE_I2:
EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
sp++;
break;
break;
case MONO_TYPE_I4:
EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
sp++;
break;
case MONO_TYPE_U4:
EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
sp++;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
mini_type_to_eval_stack_type ((cfg), field->type, *sp);
sp++;
break;
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (!mono_gc_is_moving ()) {
EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
mini_type_to_eval_stack_type ((cfg), field->type, *sp);
sp++;
} else {
is_const = FALSE;
}
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
sp++;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_VALUETYPE:
default:
is_const = FALSE;
break;
}
}
if (!is_const) {
MonoInst *load;
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
load->flags |= ins_flag;
*sp++ = load;
}
}
field_access_end:
if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
break;
}
case MONO_CEE_STOBJ:
sp -= 2;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
/*
* Array opcodes
*/
case MONO_CEE_NEWARR: {
MonoInst *len_ins;
const char *data_ptr;
int data_size = 0;
guint32 field_token;
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID)
UNVERIFIED;
context_used = mini_class_check_context_used (cfg, klass);
#ifndef TARGET_S390X
if (sp [0]->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4) {
MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I4;
ins->dreg = alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
#else
/* The array allocator expects a 64-bit input, and we cannot rely
on the high bits of a 32-bit result, so we have to extend. */
if (sp [0]->type == STACK_I4 && TARGET_SIZEOF_VOID_P == 8) {
MONO_INST_NEW (cfg, ins, OP_ICONV_TO_I8);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I8;
ins->dreg = alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
#endif
if (context_used) {
MonoInst *args [3];
MonoClass *array_class = mono_class_create_array (klass, 1);
MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
/* FIXME: Use OP_NEWARR and decompose later to help abcrem */
/* vtable */
args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
array_class, MONO_RGCTX_INFO_VTABLE);
/* array len */
args [1] = sp [0];
if (managed_alloc)
ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
else
ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
} else {
/* Decompose later since it is needed by abcrem */
MonoClass *array_type = mono_class_create_array (klass, 1);
mono_class_vtable_checked (array_type, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (array_type);
MONO_INST_NEW (cfg, ins, OP_NEWARR);
ins->dreg = alloc_ireg_ref (cfg);
ins->sreg1 = sp [0]->dreg;
ins->inst_newa_class = klass;
ins->type = STACK_OBJ;
ins->klass = array_type;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
/* Needed so mono_emit_load_get_addr () gets called */
mono_get_got_var (cfg);
}
len_ins = sp [0];
ip += 5;
*sp++ = ins;
inline_costs += 1;
/*
* we inline/optimize the initialization sequence if possible.
* we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
* for small sizes open code the memcpy
* ensure the rva field is big enough
*/
if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end
&& ip_in_bb (cfg, cfg->cbb, next_ip)
&& (len_ins->opcode == OP_ICONST)
&& (data_ptr = initialize_array_data (cfg, method,
cfg->compile_aot, next_ip, end, klass,
len_ins->inst_c0, &data_size, &field_token,
&il_op, &next_ip))) {
MonoMethod *memcpy_method = mini_get_memcpy_method ();
MonoInst *iargs [3];
int add_reg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
if (cfg->compile_aot) {
EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
} else {
EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
}
EMIT_NEW_ICONST (cfg, iargs [2], data_size);
mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
}
break;
}
case MONO_CEE_LDLEN:
--sp;
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_LDLEN);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = sp [0]->dreg;
ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length);
ins->type = STACK_I4;
/* This flag will be inherited by the decomposition */
ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sp [0]->dreg);
*sp++ = ins;
break;
case MONO_CEE_LDELEMA:
sp -= 2;
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
cfg->flags |= MONO_CFG_HAS_LDELEMA;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
/* we need to make sure that this array is exactly the type it needs
* to be for correctness. the wrappers are lax with their usage
* so we need to ignore them here
*/
if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
MonoClass *array_class = mono_class_create_array (klass, 1);
mini_emit_check_array_type (cfg, sp [0], array_class);
CHECK_TYPELOAD (array_class);
}
readonly = FALSE;
ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
*sp++ = ins;
break;
case MONO_CEE_LDELEM:
case MONO_CEE_LDELEM_I1:
case MONO_CEE_LDELEM_U1:
case MONO_CEE_LDELEM_I2:
case MONO_CEE_LDELEM_U2:
case MONO_CEE_LDELEM_I4:
case MONO_CEE_LDELEM_U4:
case MONO_CEE_LDELEM_I8:
case MONO_CEE_LDELEM_I:
case MONO_CEE_LDELEM_R4:
case MONO_CEE_LDELEM_R8:
case MONO_CEE_LDELEM_REF: {
MonoInst *addr;
sp -= 2;
if (il_op == MONO_CEE_LDELEM) {
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_class_init_internal (klass);
}
else
klass = array_access_to_klass (il_op);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
cfg->flags |= MONO_CFG_HAS_LDELEMA;
if (mini_is_gsharedvt_variable_klass (klass)) {
// FIXME-VT: OP_ICONST optimization
addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
ins->opcode = OP_LOADV_MEMBASE;
} else if (sp [1]->opcode == OP_ICONST) {
int array_reg = sp [0]->dreg;
int index_reg = sp [1]->dreg;
int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset);
} else {
addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
}
*sp++ = ins;
break;
}
case MONO_CEE_STELEM_I:
case MONO_CEE_STELEM_I1:
case MONO_CEE_STELEM_I2:
case MONO_CEE_STELEM_I4:
case MONO_CEE_STELEM_I8:
case MONO_CEE_STELEM_R4:
case MONO_CEE_STELEM_R8:
case MONO_CEE_STELEM_REF:
case MONO_CEE_STELEM: {
sp -= 3;
cfg->flags |= MONO_CFG_HAS_LDELEMA;
if (il_op == MONO_CEE_STELEM) {
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_class_init_internal (klass);
}
else
klass = array_access_to_klass (il_op);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]);
mini_emit_array_store (cfg, klass, sp, TRUE);
inline_costs += 1;
break;
}
case MONO_CEE_CKFINITE: {
--sp;
if (cfg->llvm_only) {
MonoInst *iargs [1];
iargs [0] = sp [0];
*sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
} else {
sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]);
MONO_INST_NEW (cfg, ins, OP_CKFINITE);
ins->sreg1 = sp [0]->dreg;
ins->dreg = alloc_freg (cfg);
ins->type = STACK_R8;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = mono_decompose_opcode (cfg, ins);
}
break;
}
case MONO_CEE_REFANYVAL: {
MonoInst *src_var, *src;
int klass_reg = alloc_preg (cfg);
int dreg = alloc_preg (cfg);
GSHAREDVT_FAILURE (il_op);
MONO_INST_NEW (cfg, ins, il_op);
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
context_used = mini_class_check_context_used (cfg, klass);
// FIXME:
src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
if (!src_var)
src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
if (context_used) {
MonoInst *klass_ins;
klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_KLASS);
// FIXME:
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
} else {
mini_emit_class_check (cfg, klass_reg, klass);
}
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
ins->type = STACK_MP;
ins->klass = klass;
*sp++ = ins;
break;
}
case MONO_CEE_MKREFANY: {
MonoInst *loc, *addr;
GSHAREDVT_FAILURE (il_op);
MONO_INST_NEW (cfg, ins, il_op);
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
context_used = mini_class_check_context_used (cfg, klass);
loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL);
EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
int type_reg = alloc_preg (cfg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ());
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
ins->type = STACK_VTYPE;
ins->klass = mono_defaults.typed_reference_class;
*sp++ = ins;
break;
}
case MONO_CEE_LDTOKEN: {
gpointer handle;
MonoClass *handle_class;
if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
handle = mono_method_get_wrapper_data (method, n);
handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
if (handle_class == mono_defaults.typehandle_class)
handle = m_class_get_byval_arg ((MonoClass*)handle);
}
else {
handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, cfg->error);
CHECK_CFG_ERROR;
}
if (!handle)
LOAD_ERROR;
mono_class_init_internal (handle_class);
if (cfg->gshared) {
if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
/* This case handles ldtoken
of an open type, like for
typeof(Gen<>). */
context_used = 0;
} else if (handle_class == mono_defaults.typehandle_class) {
context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle));
} else if (handle_class == mono_defaults.fieldhandle_class)
context_used = mini_class_check_context_used (cfg, m_field_get_parent (((MonoClassField*)handle)));
else if (handle_class == mono_defaults.methodhandle_class)
context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
else
g_assert_not_reached ();
}
{
if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) &&
((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) &&
(cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) &&
(cmethod->klass == mono_defaults.systemtype_class) &&
(strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle);
mono_class_init_internal (tclass);
// Optimize to true/false if next instruction is `call instance bool Type::get_IsValueType()`
guchar *is_vt_ip;
guint32 is_vt_token;
if ((is_vt_ip = il_read_call (next_ip + 5, end, &is_vt_token)) && ip_in_bb (cfg, cfg->cbb, is_vt_ip)) {
MonoMethod *is_vt_method = mini_get_method (cfg, method, is_vt_token, NULL, generic_context);
if (is_vt_method->klass == mono_defaults.systemtype_class &&
!mini_is_gsharedvt_variable_klass (tclass) &&
!mono_class_is_open_constructed_type (m_class_get_byval_arg (tclass)) &&
!strcmp ("get_IsValueType", is_vt_method->name)) {
next_ip = is_vt_ip;
EMIT_NEW_ICONST (cfg, ins, m_class_is_valuetype (tclass) ? 1 : 0);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
}
if (context_used) {
MONO_INST_NEW (cfg, ins, OP_RTTYPE);
ins->dreg = alloc_ireg_ref (cfg);
ins->inst_p0 = tclass;
ins->type = STACK_OBJ;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
} else if (cfg->compile_aot) {
if (method->wrapper_type) {
error_init (error); //got to do it since there are multiple conditionals below
if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) {
/* Special case for static synchronized wrappers */
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context);
} else {
mono_error_cleanup (error); /* FIXME don't swallow the error */
/* FIXME: n is not a normal token */
DISABLE_AOT (cfg);
EMIT_NEW_PCONST (cfg, ins, NULL);
}
} else {
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
}
} else {
MonoReflectionType *rt = mono_type_get_object_checked ((MonoType *)handle, cfg->error);
CHECK_CFG_ERROR;
EMIT_NEW_PCONST (cfg, ins, rt);
}
ins->type = STACK_OBJ;
ins->klass = mono_defaults.runtimetype_class;
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip += 5;
} else {
MonoInst *addr, *vtvar;
vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
if (context_used) {
if (handle_class == mono_defaults.typehandle_class) {
ins = mini_emit_get_rgctx_klass (cfg, context_used,
mono_class_from_mono_type_internal ((MonoType *)handle),
MONO_RGCTX_INFO_TYPE);
} else if (handle_class == mono_defaults.methodhandle_class) {
ins = emit_get_rgctx_method (cfg, context_used,
(MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
} else if (handle_class == mono_defaults.fieldhandle_class) {
ins = emit_get_rgctx_field (cfg, context_used,
(MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
} else {
g_assert_not_reached ();
}
} else if (cfg->compile_aot) {
EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
} else {
EMIT_NEW_PCONST (cfg, ins, handle);
}
EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
}
}
*sp++ = ins;
break;
}
case MONO_CEE_THROW:
if (sp [-1]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_THROW);
--sp;
ins->sreg1 = sp [0]->dreg;
cfg->cbb->out_of_line = TRUE;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
/* This can complicate code generation for llvm since the return value might not be defined */
if (COMPILE_LLVM (cfg))
INLINE_FAILURE ("throw");
break;
case MONO_CEE_ENDFINALLY:
if (!ip_in_finally_clause (cfg, ip - header->code))
UNVERIFIED;
/* mono_save_seq_point_info () depends on this */
if (sp != stack_start)
emit_seq_point (cfg, method, ip, FALSE, FALSE);
MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
ins_has_side_effect = FALSE;
/*
* Control will leave the method so empty the stack, otherwise
* the next basic block will start with a nonempty stack.
*/
while (sp != stack_start) {
sp--;
}
break;
case MONO_CEE_LEAVE:
case MONO_CEE_LEAVE_S: {
GList *handlers;
/* empty the stack */
g_assert (sp >= stack_start);
sp = stack_start;
/*
* If this leave statement is in a catch block, check for a
* pending exception, and rethrow it if necessary.
* We avoid doing this in runtime invoke wrappers, since those are called
* by native code which excepts the wrapper to catch all exceptions.
*/
for (i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
/*
* Use <= in the final comparison to handle clauses with multiple
* leave statements, like in bug #78024.
* The ordering of the exception clauses guarantees that we find the
* innermost clause.
*/
if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
MonoInst *exc_ins;
MonoBasicBlock *dont_throw;
/*
MonoInst *load;
NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
*/
exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
NEW_BBLOCK (cfg, dont_throw);
/*
* Currently, we always rethrow the abort exception, despite the
* fact that this is not correct. See thread6.cs for an example.
* But propagating the abort exception is more important than
* getting the semantics right.
*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
MONO_START_BB (cfg, dont_throw);
}
}
#ifdef ENABLE_LLVM
cfg->cbb->try_end = (intptr_t)(ip - header->code);
#endif
if ((handlers = mono_find_leave_clauses (cfg, ip, target))) {
GList *tmp;
/*
* For each finally clause that we exit we need to invoke the finally block.
* After each invocation we need to add try holes for all the clauses that
* we already exited.
*/
for (tmp = handlers; tmp; tmp = tmp->next) {
MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data;
MonoExceptionClause *clause = leave->clause;
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
continue;
MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
MonoBasicBlock *dont_throw;
/*
* Emit instrumentation code before linking the basic blocks below as this
* will alter cfg->cbb.
*/
mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause);
tblock = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (tblock);
link_bblock (cfg, cfg->cbb, tblock);
MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
ins->inst_eh_blocks = tmp;
MONO_ADD_INS (cfg->cbb, ins);
cfg->cbb->has_call_handler = 1;
/* Throw exception if exvar is set */
/* FIXME Do we need this for calls from catch/filter ? */
NEW_BBLOCK (cfg, dont_throw);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL);
cfg->cbb->clause_holes = tmp;
MONO_START_BB (cfg, dont_throw);
cfg->cbb->clause_holes = tmp;
if (COMPILE_LLVM (cfg)) {
MonoBasicBlock *target_bb;
/*
* Link the finally bblock with the target, since it will
* conceptually branch there.
*/
GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
GET_BBLOCK (cfg, target_bb, target);
link_bblock (cfg, tblock, target_bb);
}
}
}
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (cfg->cbb, ins);
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
start_new_bblock = 1;
break;
}
/*
* Mono specific opcodes
*/
case MONO_CEE_MONO_ICALL: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
const MonoJitICallId jit_icall_id = (MonoJitICallId)token;
MonoJitICallInfo * const info = mono_find_jit_icall_info (jit_icall_id);
CHECK_STACK (info->sig->param_count);
sp -= info->sig->param_count;
if (token == MONO_JIT_ICALL_mono_threads_attach_coop) {
MonoInst *addr;
MonoBasicBlock *next_bb;
if (cfg->compile_aot) {
/*
* This is called on unattached threads, so it cannot go through the trampoline
* infrastructure. Use an indirect call through a got slot initialized at load time
* instead.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
} else {
ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
}
/*
* Parts of the initlocals code needs to come after this, since it might call methods like memset.
* Also profiling needs to be after attach.
*/
init_localsbb2 = cfg->cbb;
NEW_BBLOCK (cfg, next_bb);
MONO_START_BB (cfg, next_bb);
} else {
if (token == MONO_JIT_ICALL_mono_threads_detach_coop) {
/* can't emit profiling code after a detach, so emit it now */
mini_profiler_emit_leave (cfg, NULL);
detached_before_ret = TRUE;
}
ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
}
if (!MONO_TYPE_IS_VOID (info->sig->ret))
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
MonoJumpInfoType ldptr_type;
case MONO_CEE_MONO_LDPTR_CARD_TABLE:
ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_NURSERY_START:
ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_NURSERY_BITS:
ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG:
ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT;
mono_ldptr:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
case MONO_CEE_MONO_LDPTR: {
gpointer ptr;
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
ptr = mono_method_get_wrapper_data (method, token);
EMIT_NEW_PCONST (cfg, ins, ptr);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
/* Can't embed random pointers into AOT code */
DISABLE_AOT (cfg);
break;
}
case MONO_CEE_MONO_JIT_ICALL_ADDR:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, GUINT_TO_POINTER (token));
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
case MONO_CEE_MONO_ICALL_ADDR: {
MonoMethod *cmethod;
gpointer ptr;
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
if (cfg->compile_aot) {
if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
/*
* This is generated by emit_native_wrapper () to resolve the pinvoke address
* before the call, its not needed when using direct pinvoke.
* This is not an optimization, but its used to avoid looking up pinvokes
* on platforms which don't support dlopen ().
*/
EMIT_NEW_PCONST (cfg, ins, NULL);
} else {
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
}
} else {
ptr = mono_lookup_internal_call (cmethod);
g_assert (ptr);
EMIT_NEW_PCONST (cfg, ins, ptr);
}
*sp++ = ins;
break;
}
case MONO_CEE_MONO_VTADDR: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
MonoInst *src_var, *src;
--sp;
// FIXME:
src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
*sp++ = src;
break;
}
case MONO_CEE_MONO_NEWOBJ: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
MonoInst *iargs [2];
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
mono_class_init_internal (klass);
NEW_CLASSCONST (cfg, iargs [0], klass);
MONO_ADD_INS (cfg->cbb, iargs [0]);
*sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_MONO_OBJADDR:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
--sp;
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = alloc_ireg_mp (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_MONO_LDNATIVEOBJ:
/*
* Similar to LDOBJ, but instead load the unmanaged
* representation of the vtype to the stack.
*/
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
--sp;
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
g_assert (m_class_is_valuetype (klass));
mono_class_init_internal (klass);
{
MonoInst *src, *dest, *temp;
src = sp [0];
temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL);
temp->backend.is_pinvoke = 1;
EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
dest->type = STACK_VTYPE;
dest->klass = klass;
*sp ++ = dest;
}
break;
case MONO_CEE_MONO_RETOBJ: {
/*
* Same as RET, but return the native representation of a vtype
* to the caller.
*/
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
g_assert (cfg->ret);
g_assert (mono_method_signature_internal (method)->pinvoke);
--sp;
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
if (!cfg->vret_addr) {
g_assert (cfg->ret_var_is_local);
EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
} else {
EMIT_NEW_RETLOADA (cfg, ins);
}
mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
if (sp != stack_start)
UNVERIFIED;
if (!detached_before_ret)
mini_profiler_emit_leave (cfg, sp [0]);
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
}
case MONO_CEE_MONO_SAVE_LMF:
case MONO_CEE_MONO_RESTORE_LMF:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
break;
case MONO_CEE_MONO_CLASSCONST:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
case MONO_CEE_MONO_METHODCONST:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_METHODCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
*sp++ = ins;
break;
case MONO_CEE_MONO_PINVOKE_ADDR_CACHE: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
MonoMethod *pinvoke_method = (MonoMethod*)mono_method_get_wrapper_data (method, token);
/* This is a memory slot used by the wrapper */
if (cfg->compile_aot) {
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE, pinvoke_method);
} else {
gpointer addr = mono_mem_manager_alloc0 (cfg->mem_manager, sizeof (gpointer));
EMIT_NEW_PCONST (cfg, ins, addr);
}
*sp++ = ins;
break;
}
case MONO_CEE_MONO_NOT_TAKEN:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
cfg->cbb->out_of_line = TRUE;
break;
case MONO_CEE_MONO_TLS: {
MonoTlsKey key;
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
key = (MonoTlsKey)n;
g_assert (key < TLS_KEY_NUM);
ins = mono_create_tls_get (cfg, key);
g_assert (ins);
ins->type = STACK_PTR;
*sp++ = ins;
break;
}
case MONO_CEE_MONO_DYN_CALL: {
MonoCallInst *call;
/* It would be easier to call a trampoline, but that would put an
* extra frame on the stack, confusing exception handling. So
* implement it inline using an opcode for now.
*/
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
if (!cfg->dyn_call_var) {
cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
}
/* Has to use a call inst since local regalloc expects it */
MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
ins = (MonoInst*)call;
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
/* OP_DYN_CALL might need to allocate a dynamically sized param area */
cfg->flags |= MONO_CFG_HAS_ALLOCA;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_MONO_MEMORY_BARRIER: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
mini_emit_memory_barrier (cfg, (int)n);
break;
}
case MONO_CEE_MONO_ATOMIC_STORE_I4: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
sp -= 2;
MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
ins->dreg = sp [0]->dreg;
ins->sreg1 = sp [1]->dreg;
ins->backend.memory_barrier_kind = (int)n;
MONO_ADD_INS (cfg->cbb, ins);
break;
}
case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: {
CHECK_STACK (1);
--sp;
dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
*sp++ = ins;
break;
}
case MONO_CEE_MONO_CALLI_EXTRA_ARG: {
MonoInst *addr;
MonoMethodSignature *fsig;
MonoInst *arg;
/*
* This is the same as CEE_CALLI, but passes an additional argument
* to the called method in llvmonly mode.
* This is only used by delegate invoke wrappers to call the
* actual delegate method.
*/
g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
ins = NULL;
cmethod = NULL;
CHECK_STACK (1);
--sp;
addr = *sp;
fsig = mini_get_signature (method, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
if (cfg->llvm_only)
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
n = fsig->param_count + fsig->hasthis + 1;
CHECK_STACK (n);
sp -= n;
arg = sp [n - 1];
if (cfg->llvm_only) {
/*
* The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
* cconv. This is set by mono_init_delegate ().
*/
if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
MonoInst *callee = addr;
MonoInst *call, *localloc_ins;
MonoBasicBlock *is_gsharedvt_bb, *end_bb;
int low_bit_reg = alloc_preg (cfg);
NEW_BBLOCK (cfg, is_gsharedvt_bb);
NEW_BBLOCK (cfg, end_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
/* Normal case: callee uses a normal cconv, have to add an out wrapper */
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
/*
* ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
*/
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
MONO_ADD_INS (cfg->cbb, ins);
localloc_ins = ins;
cfg->flags |= MONO_CFG_HAS_ALLOCA;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
MONO_START_BB (cfg, is_gsharedvt_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
ins->dreg = call->dreg;
MONO_START_BB (cfg, end_bb);
} else {
/* Caller uses a normal calling conv */
MonoInst *callee = addr;
MonoInst *call, *localloc_ins;
MonoBasicBlock *is_gsharedvt_bb, *end_bb;
int low_bit_reg = alloc_preg (cfg);
NEW_BBLOCK (cfg, is_gsharedvt_bb);
NEW_BBLOCK (cfg, end_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
/* Normal case: callee uses a normal cconv, no conversion is needed */
call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
MONO_START_BB (cfg, is_gsharedvt_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
MONO_ADD_INS (cfg->cbb, addr);
/*
* ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
*/
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
MONO_ADD_INS (cfg->cbb, ins);
localloc_ins = ins;
cfg->flags |= MONO_CFG_HAS_ALLOCA;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
ins->dreg = call->dreg;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
}
} else {
/* Same as CEE_CALLI */
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
/*
* We pass the address to the gsharedvt trampoline in the rgctx reg
*/
MonoInst *callee = addr;
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
} else {
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
}
}
if (!MONO_TYPE_IS_VOID (fsig->ret))
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
CHECK_CFG_EXCEPTION;
ins_flag = 0;
constrained_class = NULL;
break;
}
case MONO_CEE_MONO_LDDOMAIN: {
MonoDomain *domain = mono_get_root_domain ();
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : domain);
*sp++ = ins;
break;
}
case MONO_CEE_MONO_SAVE_LAST_ERROR:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
// Just an IL prefix, setting this flag, picked up by call instructions.
save_last_error = TRUE;
break;
case MONO_CEE_MONO_GET_RGCTX_ARG:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
mono_create_rgctx_var (cfg);
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = alloc_dreg (cfg, STACK_PTR);
ins->sreg1 = cfg->rgctx_var->dreg;
ins->type = STACK_PTR;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_MONO_GET_SP: {
/* Used by COOP only, so this is good enough */
MonoInst *var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
EMIT_NEW_VARLOADA (cfg, ins, var, NULL);
*sp++ = ins;
break;
}
case MONO_CEE_MONO_REMAP_OVF_EXC:
/* Remap the exception thrown by the next _OVF opcode */
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
ovf_exc = (const char*)mono_method_get_wrapper_data (method, token);
break;
case MONO_CEE_ARGLIST: {
/* somewhat similar to LDTOKEN */
MonoInst *addr, *vtvar;
vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL);
EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
ins->type = STACK_VTYPE;
ins->klass = mono_defaults.argumenthandle_class;
*sp++ = ins;
break;
}
case MONO_CEE_CEQ:
case MONO_CEE_CGT:
case MONO_CEE_CGT_UN:
case MONO_CEE_CLT:
case MONO_CEE_CLT_UN: {
MonoInst *cmp, *arg1, *arg2;
sp -= 2;
arg1 = sp [0];
arg2 = sp [1];
/*
* The following transforms:
* CEE_CEQ into OP_CEQ
* CEE_CGT into OP_CGT
* CEE_CGT_UN into OP_CGT_UN
* CEE_CLT into OP_CLT
* CEE_CLT_UN into OP_CLT_UN
*/
MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
MONO_INST_NEW (cfg, ins, cmp->opcode);
cmp->sreg1 = arg1->dreg;
cmp->sreg2 = arg2->dreg;
type_from_op (cfg, cmp, arg1, arg2);
CHECK_TYPE (cmp);
add_widen_op (cfg, cmp, &arg1, &arg2);
if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
cmp->opcode = OP_LCOMPARE;
else if (arg1->type == STACK_R4)
cmp->opcode = OP_RCOMPARE;
else if (arg1->type == STACK_R8)
cmp->opcode = OP_FCOMPARE;
else
cmp->opcode = OP_ICOMPARE;
MONO_ADD_INS (cfg->cbb, cmp);
ins->type = STACK_I4;
ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
type_from_op (cfg, ins, arg1, arg2);
if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
/*
* The backends expect the fceq opcodes to do the
* comparison too.
*/
ins->sreg1 = cmp->sreg1;
ins->sreg2 = cmp->sreg2;
NULLIFY_INS (cmp);
}
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
}
case MONO_CEE_LDFTN: {
MonoInst *argconst;
MonoMethod *cil_method;
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
CHECK_CFG_ERROR;
if (constrained_class) {
if (m_method_is_static (cmethod) && mini_class_check_context_used (cfg, constrained_class))
// FIXME:
GENERIC_SHARING_FAILURE (CEE_LDFTN);
cmethod = get_constrained_method (cfg, image, n, cmethod, constrained_class, generic_context);
constrained_class = NULL;
CHECK_CFG_ERROR;
}
mono_class_init_internal (cmethod->klass);
mono_save_token_info (cfg, image, n, cmethod);
context_used = mini_method_check_context_used (cfg, cmethod);
cil_method = cmethod;
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
emit_method_access_failure (cfg, method, cil_method);
const gboolean has_unmanaged_callers_only =
cmethod->wrapper_type == MONO_WRAPPER_NONE &&
mono_method_has_unmanaged_callers_only_attribute (cmethod);
/*
* Optimize the common case of ldftn+delegate creation
*/
if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins, *handle_ins;
MonoMethod *invoke;
int invoke_context_used;
if (G_UNLIKELY (has_unmanaged_callers_only)) {
mono_error_set_not_supported (cfg->error, "Cannot create delegate from method with UnmanagedCallersOnlyAttribute");
CHECK_CFG_ERROR;
}
invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
if (!invoke || !mono_method_signature_internal (invoke))
LOAD_ERROR;
invoke_context_used = mini_method_check_context_used (cfg, invoke);
target_ins = sp [-1];
if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
/*BAD IMPL: We must not add a null check for virtual invoke delegates.*/
if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
}
}
if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) {
if (cfg->verbose_level > 3)
g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) {
sp --;
*sp = handle_ins;
CHECK_CFG_EXCEPTION;
sp ++;
next_ip += 5;
il_op = MONO_CEE_NEWOBJ;
break;
} else {
CHECK_CFG_ERROR;
}
}
}
}
/* UnmanagedCallersOnlyAttribute means ldftn should return a method callable from native */
if (G_UNLIKELY (has_unmanaged_callers_only)) {
if (G_UNLIKELY (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
// Follow CoreCLR, disallow [UnmanagedCallersOnly] and [DllImport] to be used
// together
emit_not_supported_failure (cfg);
EMIT_NEW_PCONST (cfg, ins, NULL);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
MonoClass *delegate_klass = NULL;
MonoGCHandle target_handle = 0;
ERROR_DECL (wrapper_error);
MonoMethod *wrapped_cmethod;
wrapped_cmethod = mono_marshal_get_managed_wrapper (cmethod, delegate_klass, target_handle, wrapper_error);
if (!is_ok (wrapper_error)) {
/* if we couldn't create a wrapper because cmethod isn't supposed to have an
UnmanagedCallersOnly attribute, follow CoreCLR behavior and throw when the
method with the ldftn is executing, not when it is being compiled. */
emit_invalid_program_with_msg (cfg, wrapper_error, method, cmethod);
mono_error_cleanup (wrapper_error);
EMIT_NEW_PCONST (cfg, ins, NULL);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
} else {
cmethod = wrapped_cmethod;
}
}
argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_LDVIRTFTN: {
MonoInst *args [2];
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
CHECK_CFG_ERROR;
mono_class_init_internal (cmethod->klass);
context_used = mini_method_check_context_used (cfg, cmethod);
/*
* Optimize the common case of ldvirtftn+delegate creation
*/
if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins, *handle_ins;
MonoMethod *invoke;
int invoke_context_used;
const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0;
invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
if (!invoke || !mono_method_signature_internal (invoke))
LOAD_ERROR;
invoke_context_used = mini_method_check_context_used (cfg, invoke);
target_ins = sp [-1];
if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) {
if (cfg->verbose_level > 3)
g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) {
sp -= 2;
*sp = handle_ins;
CHECK_CFG_EXCEPTION;
next_ip += 5;
previous_il_op = MONO_CEE_NEWOBJ;
sp ++;
break;
} else {
CHECK_CFG_ERROR;
}
}
}
}
--sp;
args [0] = *sp;
args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
if (context_used)
*sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
else
*sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_LOCALLOC: {
MonoBasicBlock *non_zero_bb, *end_bb;
int alloc_ptr = alloc_preg (cfg);
--sp;
if (sp != stack_start)
UNVERIFIED;
if (cfg->method != method)
/*
* Inlining this into a loop in a parent could lead to
* stack overflows which is different behavior than the
* non-inlined case, thus disable inlining in this case.
*/
INLINE_FAILURE("localloc");
NEW_BBLOCK (cfg, non_zero_bb);
NEW_BBLOCK (cfg, end_bb);
/* if size != zero */
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
//size is zero, so result is NULL
MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, non_zero_bb);
MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
ins->dreg = alloc_ptr;
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_PTR;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ALLOCA;
if (header->init_locals)
ins->flags |= MONO_INST_INIT;
MONO_START_BB (cfg, end_bb);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
ins->type = STACK_PTR;
*sp++ = ins;
break;
}
case MONO_CEE_ENDFILTER: {
MonoExceptionClause *clause, *nearest;
int cc;
--sp;
if ((sp != stack_start) || (sp [0]->type != STACK_I4))
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
ins->sreg1 = (*sp)->dreg;
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
nearest = NULL;
for (cc = 0; cc < header->num_clauses; ++cc) {
clause = &header->clauses [cc];
if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) &&
(!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
nearest = clause;
}
g_assert (nearest);
if ((next_ip - header->code) != nearest->handler_offset)
UNVERIFIED;
break;
}
case MONO_CEE_UNALIGNED_:
ins_flag |= MONO_INST_UNALIGNED;
/* FIXME: record alignment? we can assume 1 for now */
break;
case MONO_CEE_VOLATILE_:
ins_flag |= MONO_INST_VOLATILE;
break;
case MONO_CEE_TAIL_:
ins_flag |= MONO_INST_TAILCALL;
cfg->flags |= MONO_CFG_HAS_TAILCALL;
/* Can't inline tailcalls at this time */
inline_costs += 100000;
break;
case MONO_CEE_INITOBJ:
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (mini_class_is_reference (klass))
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
else
mini_emit_initobj (cfg, *sp, NULL, klass);
inline_costs += 1;
break;
case MONO_CEE_CONSTRAINED_:
constrained_class = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (constrained_class);
ins_has_side_effect = FALSE;
break;
case MONO_CEE_CPBLK:
sp -= 3;
mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
case MONO_CEE_INITBLK:
sp -= 3;
mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
case MONO_CEE_NO_:
if (ip [2] & CEE_NO_TYPECHECK)
ins_flag |= MONO_INST_NOTYPECHECK;
if (ip [2] & CEE_NO_RANGECHECK)
ins_flag |= MONO_INST_NORANGECHECK;
if (ip [2] & CEE_NO_NULLCHECK)
ins_flag |= MONO_INST_NONULLCHECK;
break;
case MONO_CEE_RETHROW: {
MonoInst *load;
int handler_offset = -1;
for (i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
handler_offset = clause->handler_offset;
break;
}
}
cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
if (handler_offset == -1)
UNVERIFIED;
EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
MONO_INST_NEW (cfg, ins, OP_RETHROW);
ins->sreg1 = load->dreg;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
}
case MONO_CEE_MONO_RETHROW: {
if (sp [-1]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_RETHROW);
--sp;
ins->sreg1 = sp [0]->dreg;
cfg->cbb->out_of_line = TRUE;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
/* This can complicate code generation for llvm since the return value might not be defined */
if (COMPILE_LLVM (cfg))
INLINE_FAILURE ("mono_rethrow");
break;
}
case MONO_CEE_SIZEOF: {
guint32 val;
int ialign;
if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) {
MonoType *type = mono_type_create_from_typespec_checked (image, token, cfg->error);
CHECK_CFG_ERROR;
val = mono_type_size (type, &ialign);
EMIT_NEW_ICONST (cfg, ins, val);
} else {
MonoClass *klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (mini_is_gsharedvt_klass (klass)) {
ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_SIZEOF);
ins->type = STACK_I4;
} else {
val = mono_type_size (m_class_get_byval_arg (klass), &ialign);
EMIT_NEW_ICONST (cfg, ins, val);
}
}
*sp++ = ins;
break;
}
case MONO_CEE_REFANYTYPE: {
MonoInst *src_var, *src;
GSHAREDVT_FAILURE (il_op);
--sp;
// FIXME:
src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
if (!src_var)
src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
*sp++ = ins;
break;
}
case MONO_CEE_READONLY_:
readonly = TRUE;
break;
case MONO_CEE_UNUSED56:
case MONO_CEE_UNUSED57:
case MONO_CEE_UNUSED70:
case MONO_CEE_UNUSED:
case MONO_CEE_UNUSED99:
case MONO_CEE_UNUSED58:
case MONO_CEE_UNUSED1:
UNVERIFIED;
default:
g_warning ("opcode 0x%02x not handled", il_op);
UNVERIFIED;
}
if (ins_has_side_effect)
cfg->cbb->flags |= BB_HAS_SIDE_EFFECTS;
}
if (start_new_bblock != 1)
UNVERIFIED;
cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
if (cfg->cbb->next_bb) {
/* This could already be set because of inlining, #693905 */
MonoBasicBlock *bb = cfg->cbb;
while (bb->next_bb)
bb = bb->next_bb;
bb->next_bb = end_bblock;
} else {
cfg->cbb->next_bb = end_bblock;
}
#if defined(TARGET_POWERPC) || defined(TARGET_X86)
if (cfg->compile_aot)
/* FIXME: The plt slots require a GOT var even if the method doesn't use it */
mono_get_got_var (cfg);
#endif
#ifdef TARGET_WASM
if (cfg->lmf_var && !cfg->deopt) {
// mini_llvmonly_pop_lmf () might be called before emit_push_lmf () so initialize the LMF
cfg->cbb = init_localsbb;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
int lmf_reg = ins->dreg;
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), 0);
}
#endif
if (cfg->method == method && cfg->got_var)
mono_emit_load_got_addr (cfg);
if (init_localsbb) {
cfg->cbb = init_localsbb;
cfg->ip = NULL;
for (i = 0; i < header->num_locals; ++i) {
/*
* Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
* which need the trampoline code to work.
*/
if (MONO_TYPE_ISSTRUCT (header->locals [i]))
cfg->cbb = init_localsbb2;
else
cfg->cbb = init_localsbb;
emit_init_local (cfg, i, header->locals [i], init_locals);
}
}
if (cfg->init_ref_vars && cfg->method == method) {
/* Emit initialization for ref vars */
// FIXME: Avoid duplication initialization for IL locals.
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *ins = cfg->varinfo [i];
if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
}
}
if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
cfg->cbb = init_localsbb;
emit_push_lmf (cfg);
}
/* emit profiler enter code after a jit attach if there is one */
cfg->cbb = init_localsbb2;
mini_profiler_emit_enter (cfg);
cfg->cbb = init_localsbb;
if (seq_points) {
MonoBasicBlock *bb;
/*
* Make seq points at backward branch targets interruptable.
*/
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
}
/* Add a sequence point for method entry/exit events */
if (seq_points && cfg->gen_sdb_seq_points) {
NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
MONO_ADD_INS (init_localsbb, ins);
NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
MONO_ADD_INS (cfg->bb_exit, ins);
}
/*
* Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
* the code they refer to was dead (#11880).
*/
if (sym_seq_points) {
for (i = 0; i < header->code_size; ++i) {
if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
MonoInst *ins;
NEW_SEQ_POINT (cfg, ins, i, FALSE);
mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
}
}
}
cfg->ip = NULL;
if (cfg->method == method) {
compute_bb_regions (cfg);
} else {
MonoBasicBlock *bb;
/* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
bb->real_offset = inline_offset;
}
}
if (inline_costs < 0) {
char *mname;
/* Method is too large */
mname = mono_method_full_name (method, TRUE);
mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
g_free (mname);
}
if ((cfg->verbose_level > 2) && (cfg->method == method))
mono_print_code (cfg, "AFTER METHOD-TO-IR");
goto cleanup;
mono_error_exit:
if (cfg->verbose_level > 3)
g_print ("exiting due to error");
g_assert (!is_ok (cfg->error));
goto cleanup;
exception_exit:
if (cfg->verbose_level > 3)
g_print ("exiting due to exception");
g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
goto cleanup;
unverified:
if (cfg->verbose_level > 3)
g_print ("exiting due to invalid il");
set_exception_type_from_invalid_il (cfg, method, ip);
goto cleanup;
cleanup:
g_slist_free (class_inits);
mono_basic_block_free (original_bb);
cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
if (cfg->exception_type)
return -1;
else
return inline_costs;
}
static int
store_membase_reg_to_store_membase_imm (int opcode)
{
switch (opcode) {
case OP_STORE_MEMBASE_REG:
return OP_STORE_MEMBASE_IMM;
case OP_STOREI1_MEMBASE_REG:
return OP_STOREI1_MEMBASE_IMM;
case OP_STOREI2_MEMBASE_REG:
return OP_STOREI2_MEMBASE_IMM;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMBASE_IMM;
case OP_STOREI8_MEMBASE_REG:
return OP_STOREI8_MEMBASE_IMM;
default:
g_assert_not_reached ();
}
return -1;
}
int
mono_op_to_op_imm (int opcode)
{
switch (opcode) {
case OP_IADD:
return OP_IADD_IMM;
case OP_ISUB:
return OP_ISUB_IMM;
case OP_IDIV:
return OP_IDIV_IMM;
case OP_IDIV_UN:
return OP_IDIV_UN_IMM;
case OP_IREM:
return OP_IREM_IMM;
case OP_IREM_UN:
return OP_IREM_UN_IMM;
case OP_IMUL:
return OP_IMUL_IMM;
case OP_IAND:
return OP_IAND_IMM;
case OP_IOR:
return OP_IOR_IMM;
case OP_IXOR:
return OP_IXOR_IMM;
case OP_ISHL:
return OP_ISHL_IMM;
case OP_ISHR:
return OP_ISHR_IMM;
case OP_ISHR_UN:
return OP_ISHR_UN_IMM;
case OP_LADD:
return OP_LADD_IMM;
case OP_LSUB:
return OP_LSUB_IMM;
case OP_LAND:
return OP_LAND_IMM;
case OP_LOR:
return OP_LOR_IMM;
case OP_LXOR:
return OP_LXOR_IMM;
case OP_LSHL:
return OP_LSHL_IMM;
case OP_LSHR:
return OP_LSHR_IMM;
case OP_LSHR_UN:
return OP_LSHR_UN_IMM;
#if SIZEOF_REGISTER == 8
case OP_LMUL:
return OP_LMUL_IMM;
case OP_LREM:
return OP_LREM_IMM;
#endif
case OP_COMPARE:
return OP_COMPARE_IMM;
case OP_ICOMPARE:
return OP_ICOMPARE_IMM;
case OP_LCOMPARE:
return OP_LCOMPARE_IMM;
case OP_STORE_MEMBASE_REG:
return OP_STORE_MEMBASE_IMM;
case OP_STOREI1_MEMBASE_REG:
return OP_STOREI1_MEMBASE_IMM;
case OP_STOREI2_MEMBASE_REG:
return OP_STOREI2_MEMBASE_IMM;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMBASE_IMM;
#if defined(TARGET_X86) || defined (TARGET_AMD64)
case OP_X86_PUSH:
return OP_X86_PUSH_IMM;
case OP_X86_COMPARE_MEMBASE_REG:
return OP_X86_COMPARE_MEMBASE_IMM;
#endif
#if defined(TARGET_AMD64)
case OP_AMD64_ICOMPARE_MEMBASE_REG:
return OP_AMD64_ICOMPARE_MEMBASE_IMM;
#endif
case OP_VOIDCALL_REG:
return OP_VOIDCALL;
case OP_CALL_REG:
return OP_CALL;
case OP_LCALL_REG:
return OP_LCALL;
case OP_FCALL_REG:
return OP_FCALL;
case OP_LOCALLOC:
return OP_LOCALLOC_IMM;
}
return -1;
}
int
mono_load_membase_to_load_mem (int opcode)
{
// FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_LOAD_MEMBASE:
return OP_LOAD_MEM;
case OP_LOADU1_MEMBASE:
return OP_LOADU1_MEM;
case OP_LOADU2_MEMBASE:
return OP_LOADU2_MEM;
case OP_LOADI4_MEMBASE:
return OP_LOADI4_MEM;
case OP_LOADU4_MEMBASE:
return OP_LOADU4_MEM;
#if SIZEOF_REGISTER == 8
case OP_LOADI8_MEMBASE:
return OP_LOADI8_MEM;
#endif
}
#endif
return -1;
}
static int
op_to_op_dest_membase (int store_opcode, int opcode)
{
#if defined(TARGET_X86)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
return -1;
switch (opcode) {
case OP_IADD:
return OP_X86_ADD_MEMBASE_REG;
case OP_ISUB:
return OP_X86_SUB_MEMBASE_REG;
case OP_IAND:
return OP_X86_AND_MEMBASE_REG;
case OP_IOR:
return OP_X86_OR_MEMBASE_REG;
case OP_IXOR:
return OP_X86_XOR_MEMBASE_REG;
case OP_ADD_IMM:
case OP_IADD_IMM:
return OP_X86_ADD_MEMBASE_IMM;
case OP_SUB_IMM:
case OP_ISUB_IMM:
return OP_X86_SUB_MEMBASE_IMM;
case OP_AND_IMM:
case OP_IAND_IMM:
return OP_X86_AND_MEMBASE_IMM;
case OP_OR_IMM:
case OP_IOR_IMM:
return OP_X86_OR_MEMBASE_IMM;
case OP_XOR_IMM:
case OP_IXOR_IMM:
return OP_X86_XOR_MEMBASE_IMM;
case OP_MOVE:
return OP_NOP;
}
#endif
#if defined(TARGET_AMD64)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
return -1;
switch (opcode) {
case OP_IADD:
return OP_X86_ADD_MEMBASE_REG;
case OP_ISUB:
return OP_X86_SUB_MEMBASE_REG;
case OP_IAND:
return OP_X86_AND_MEMBASE_REG;
case OP_IOR:
return OP_X86_OR_MEMBASE_REG;
case OP_IXOR:
return OP_X86_XOR_MEMBASE_REG;
case OP_IADD_IMM:
return OP_X86_ADD_MEMBASE_IMM;
case OP_ISUB_IMM:
return OP_X86_SUB_MEMBASE_IMM;
case OP_IAND_IMM:
return OP_X86_AND_MEMBASE_IMM;
case OP_IOR_IMM:
return OP_X86_OR_MEMBASE_IMM;
case OP_IXOR_IMM:
return OP_X86_XOR_MEMBASE_IMM;
case OP_LADD:
return OP_AMD64_ADD_MEMBASE_REG;
case OP_LSUB:
return OP_AMD64_SUB_MEMBASE_REG;
case OP_LAND:
return OP_AMD64_AND_MEMBASE_REG;
case OP_LOR:
return OP_AMD64_OR_MEMBASE_REG;
case OP_LXOR:
return OP_AMD64_XOR_MEMBASE_REG;
case OP_ADD_IMM:
case OP_LADD_IMM:
return OP_AMD64_ADD_MEMBASE_IMM;
case OP_SUB_IMM:
case OP_LSUB_IMM:
return OP_AMD64_SUB_MEMBASE_IMM;
case OP_AND_IMM:
case OP_LAND_IMM:
return OP_AMD64_AND_MEMBASE_IMM;
case OP_OR_IMM:
case OP_LOR_IMM:
return OP_AMD64_OR_MEMBASE_IMM;
case OP_XOR_IMM:
case OP_LXOR_IMM:
return OP_AMD64_XOR_MEMBASE_IMM;
case OP_MOVE:
return OP_NOP;
}
#endif
return -1;
}
static int
op_to_op_store_membase (int store_opcode, int opcode)
{
#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_ICEQ:
if (store_opcode == OP_STOREI1_MEMBASE_REG)
return OP_X86_SETEQ_MEMBASE;
case OP_CNE:
if (store_opcode == OP_STOREI1_MEMBASE_REG)
return OP_X86_SETNE_MEMBASE;
}
#endif
return -1;
}
static int
op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
{
#ifdef TARGET_X86
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
return OP_X86_COMPARE_MEMBASE8_IMM;
*/
if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
return -1;
switch (opcode) {
case OP_X86_PUSH:
return OP_X86_PUSH_MEMBASE;
case OP_COMPARE_IMM:
case OP_ICOMPARE_IMM:
return OP_X86_COMPARE_MEMBASE_IMM;
case OP_COMPARE:
case OP_ICOMPARE:
return OP_X86_COMPARE_MEMBASE_REG;
}
#endif
#ifdef TARGET_AMD64
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
return OP_X86_COMPARE_MEMBASE8_IMM;
*/
switch (opcode) {
case OP_X86_PUSH:
if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_X86_PUSH_MEMBASE;
break;
/* FIXME: This only works for 32 bit immediates
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_AMD64_COMPARE_MEMBASE_IMM;
*/
case OP_ICOMPARE_IMM:
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
return OP_AMD64_ICOMPARE_MEMBASE_IMM;
break;
case OP_COMPARE:
case OP_LCOMPARE:
if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
return OP_AMD64_ICOMPARE_MEMBASE_REG;
if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_AMD64_COMPARE_MEMBASE_REG;
break;
case OP_ICOMPARE:
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
return OP_AMD64_ICOMPARE_MEMBASE_REG;
break;
}
#endif
return -1;
}
static int
op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
{
#ifdef TARGET_X86
if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
return -1;
switch (opcode) {
case OP_COMPARE:
case OP_ICOMPARE:
return OP_X86_COMPARE_REG_MEMBASE;
case OP_IADD:
return OP_X86_ADD_REG_MEMBASE;
case OP_ISUB:
return OP_X86_SUB_REG_MEMBASE;
case OP_IAND:
return OP_X86_AND_REG_MEMBASE;
case OP_IOR:
return OP_X86_OR_REG_MEMBASE;
case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
}
#endif
#ifdef TARGET_AMD64
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
switch (opcode) {
case OP_ICOMPARE:
return OP_AMD64_ICOMPARE_REG_MEMBASE;
case OP_IADD:
return OP_X86_ADD_REG_MEMBASE;
case OP_ISUB:
return OP_X86_SUB_REG_MEMBASE;
case OP_IAND:
return OP_X86_AND_REG_MEMBASE;
case OP_IOR:
return OP_X86_OR_REG_MEMBASE;
case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
}
} else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
switch (opcode) {
case OP_COMPARE:
case OP_LCOMPARE:
return OP_AMD64_COMPARE_REG_MEMBASE;
case OP_LADD:
return OP_AMD64_ADD_REG_MEMBASE;
case OP_LSUB:
return OP_AMD64_SUB_REG_MEMBASE;
case OP_LAND:
return OP_AMD64_AND_REG_MEMBASE;
case OP_LOR:
return OP_AMD64_OR_REG_MEMBASE;
case OP_LXOR:
return OP_AMD64_XOR_REG_MEMBASE;
}
}
#endif
return -1;
}
int
mono_op_to_op_imm_noemul (int opcode)
{
MONO_DISABLE_WARNING(4065) // switch with default but no case
switch (opcode) {
#if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
case OP_LSHR:
case OP_LSHL:
case OP_LSHR_UN:
return -1;
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
return -1;
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV)
case OP_IMUL:
return -1;
#endif
default:
return mono_op_to_op_imm (opcode);
}
MONO_RESTORE_WARNING
}
gboolean
mono_op_no_side_effects (int opcode)
{
/* FIXME: Add more instructions */
/* INEG sets the condition codes, and the OP_LNEG decomposition depends on this on x86 */
switch (opcode) {
case OP_MOVE:
case OP_FMOVE:
case OP_VMOVE:
case OP_XMOVE:
case OP_RMOVE:
case OP_VZERO:
case OP_XZERO:
case OP_ICONST:
case OP_I8CONST:
case OP_ADD_IMM:
case OP_R8CONST:
case OP_LADD_IMM:
case OP_ISUB_IMM:
case OP_IADD_IMM:
case OP_LNEG:
case OP_ISUB:
case OP_CMOV_IGE:
case OP_ISHL_IMM:
case OP_ISHR_IMM:
case OP_ISHR_UN_IMM:
case OP_IAND_IMM:
case OP_ICONV_TO_U1:
case OP_ICONV_TO_I1:
case OP_SEXT_I4:
case OP_LCONV_TO_U1:
case OP_ICONV_TO_U2:
case OP_ICONV_TO_I2:
case OP_LCONV_TO_I2:
case OP_LDADDR:
case OP_PHI:
case OP_NOP:
case OP_ZEXT_I4:
case OP_NOT_NULL:
case OP_IL_SEQ_POINT:
case OP_RTTYPE:
return TRUE;
default:
return FALSE;
}
}
gboolean
mono_ins_no_side_effects (MonoInst *ins)
{
if (mono_op_no_side_effects (ins->opcode))
return TRUE;
if (ins->opcode == OP_AOTCONST) {
MonoJumpInfoType type = (MonoJumpInfoType)(intptr_t)ins->inst_p1;
// Some AOTCONSTs have side effects
switch (type) {
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_VTABLE:
case MONO_PATCH_INFO_METHOD_RGCTX:
return TRUE;
}
}
return FALSE;
}
/**
* mono_handle_global_vregs:
*
* Make vregs used in more than one bblock 'global', i.e. allocate a variable
* for them.
*/
void
mono_handle_global_vregs (MonoCompile *cfg)
{
gint32 *vreg_to_bb;
MonoBasicBlock *bb;
int i, pos;
vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION)
mono_simd_simplify_indirection (cfg);
#endif
/* Find local vregs used in more than one bb */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins = bb->code;
int block_num = bb->block_num;
if (cfg->verbose_level > 2)
printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
cfg->cbb = bb;
for (; ins; ins = ins->next) {
const char *spec = INS_INFO (ins->opcode);
int regtype = 0, regindex;
gint32 prev_bb;
if (G_UNLIKELY (cfg->verbose_level > 2))
mono_print_ins (ins);
g_assert (ins->opcode >= MONO_CEE_LAST);
for (regindex = 0; regindex < 4; regindex ++) {
int vreg = 0;
if (regindex == 0) {
regtype = spec [MONO_INST_DEST];
if (regtype == ' ')
continue;
vreg = ins->dreg;
} else if (regindex == 1) {
regtype = spec [MONO_INST_SRC1];
if (regtype == ' ')
continue;
vreg = ins->sreg1;
} else if (regindex == 2) {
regtype = spec [MONO_INST_SRC2];
if (regtype == ' ')
continue;
vreg = ins->sreg2;
} else if (regindex == 3) {
regtype = spec [MONO_INST_SRC3];
if (regtype == ' ')
continue;
vreg = ins->sreg3;
}
#if SIZEOF_REGISTER == 4
/* In the LLVM case, the long opcodes are not decomposed */
if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
/*
* Since some instructions reference the original long vreg,
* and some reference the two component vregs, it is quite hard
* to determine when it needs to be global. So be conservative.
*/
if (!get_vreg_to_inst (cfg, vreg)) {
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
if (cfg->verbose_level > 2)
printf ("LONG VREG R%d made global.\n", vreg);
}
/*
* Make the component vregs volatile since the optimizations can
* get confused otherwise.
*/
get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
}
#endif
g_assert (vreg != -1);
prev_bb = vreg_to_bb [vreg];
if (prev_bb == 0) {
/* 0 is a valid block num */
vreg_to_bb [vreg] = block_num + 1;
} else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
continue;
if (!get_vreg_to_inst (cfg, vreg)) {
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
switch (regtype) {
case 'i':
if (vreg_is_ref (cfg, vreg))
mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg);
else
mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg);
break;
case 'l':
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
break;
case 'f':
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg);
break;
case 'v':
case 'x':
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg);
break;
default:
g_assert_not_reached ();
}
}
/* Flag as having been used in more than one bb */
vreg_to_bb [vreg] = -1;
}
}
}
}
/* If a variable is used in only one bblock, convert it into a local vreg */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *var = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
switch (var->type) {
case STACK_I4:
case STACK_OBJ:
case STACK_PTR:
case STACK_MP:
case STACK_VTYPE:
#if SIZEOF_REGISTER == 8
case STACK_I8:
#endif
#if !defined(TARGET_X86)
/* Enabling this screws up the fp stack on x86 */
case STACK_R8:
#endif
if (mono_arch_is_soft_float ())
break;
/*
if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
break;
*/
/* Arguments are implicitly global */
/* Putting R4 vars into registers doesn't work currently */
/* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
/*
* Make that the variable's liveness interval doesn't contain a call, since
* that would cause the lvreg to be spilled, making the whole optimization
* useless.
*/
/* This is too slow for JIT compilation */
#if 0
if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
MonoInst *ins;
int def_index, call_index, ins_index;
gboolean spilled = FALSE;
def_index = -1;
call_index = -1;
ins_index = 0;
for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
const char *spec = INS_INFO (ins->opcode);
if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
def_index = ins_index;
if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
if (call_index > def_index) {
spilled = TRUE;
break;
}
}
if (MONO_IS_CALL (ins))
call_index = ins_index;
ins_index ++;
}
if (spilled)
break;
}
#endif
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
var->flags |= MONO_INST_IS_DEAD;
cfg->vreg_to_inst [var->dreg] = NULL;
}
break;
}
}
/*
* Compress the varinfo and vars tables so the liveness computation is faster and
* takes up less space.
*/
pos = 0;
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *var = cfg->varinfo [i];
if (pos < i && cfg->locals_start == i)
cfg->locals_start = pos;
if (!(var->flags & MONO_INST_IS_DEAD)) {
if (pos < i) {
cfg->varinfo [pos] = cfg->varinfo [i];
cfg->varinfo [pos]->inst_c0 = pos;
memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
cfg->vars [pos].idx = pos;
#if SIZEOF_REGISTER == 4
if (cfg->varinfo [pos]->type == STACK_I8) {
/* Modify the two component vars too */
MonoInst *var1;
var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
var1->inst_c0 = pos;
var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
var1->inst_c0 = pos;
}
#endif
}
pos ++;
}
}
cfg->num_varinfo = pos;
if (cfg->locals_start > cfg->num_varinfo)
cfg->locals_start = cfg->num_varinfo;
}
/*
* mono_allocate_gsharedvt_vars:
*
* Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
* Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
*/
void
mono_allocate_gsharedvt_vars (MonoCompile *cfg)
{
int i;
cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *ins = cfg->varinfo [i];
int idx;
if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
if (i >= cfg->locals_start) {
/* Local */
idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
ins->opcode = OP_GSHAREDVT_LOCAL;
ins->inst_imm = idx;
} else {
/* Arg */
cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
}
}
}
}
/**
* mono_spill_global_vars:
*
* Generate spill code for variables which are not allocated to registers,
* and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
* code is generated which could be optimized by the local optimization passes.
*/
void
mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
{
MonoBasicBlock *bb;
char spec2 [16];
int orig_next_vreg;
guint32 *vreg_to_lvreg;
guint32 *lvregs;
guint32 i, lvregs_len, lvregs_size;
gboolean dest_has_lvreg = FALSE;
MonoStackType stacktypes [128];
MonoInst **live_range_start, **live_range_end;
MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
*need_local_opts = FALSE;
memset (spec2, 0, sizeof (spec2));
/* FIXME: Move this function to mini.c */
stacktypes [(int)'i'] = STACK_PTR;
stacktypes [(int)'l'] = STACK_I8;
stacktypes [(int)'f'] = STACK_R8;
#ifdef MONO_ARCH_SIMD_INTRINSICS
stacktypes [(int)'x'] = STACK_VTYPE;
#endif
#if SIZEOF_REGISTER == 4
/* Create MonoInsts for longs */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
switch (ins->type) {
case STACK_R8:
case STACK_I8: {
MonoInst *tree;
if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
break;
g_assert (ins->opcode == OP_REGOFFSET);
tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
g_assert (tree);
tree->opcode = OP_REGOFFSET;
tree->inst_basereg = ins->inst_basereg;
tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
g_assert (tree);
tree->opcode = OP_REGOFFSET;
tree->inst_basereg = ins->inst_basereg;
tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
break;
}
default:
break;
}
}
}
#endif
if (cfg->compute_gc_maps) {
/* registers need liveness info even for !non refs */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
if (ins->opcode == OP_REGVAR)
ins->flags |= MONO_INST_GC_TRACK;
}
}
/* FIXME: widening and truncation */
/*
* As an optimization, when a variable allocated to the stack is first loaded into
* an lvreg, we will remember the lvreg and use it the next time instead of loading
* the variable again.
*/
orig_next_vreg = cfg->next_vreg;
vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
lvregs_size = 1024;
lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
lvregs_len = 0;
/*
* These arrays contain the first and last instructions accessing a given
* variable.
* Since we emit bblocks in the same order we process them here, and we
* don't split live ranges, these will precisely describe the live range of
* the variable, i.e. the instruction range where a valid value can be found
* in the variables location.
* The live range is computed using the liveness info computed by the liveness pass.
* We can't use vmv->range, since that is an abstract live range, and we need
* one which is instruction precise.
* FIXME: Variables used in out-of-line bblocks have a hole in their live range.
*/
/* FIXME: Only do this if debugging info is requested */
live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
/* Add spill loads/stores */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
if (cfg->verbose_level > 2)
printf ("\nSPILL BLOCK %d:\n", bb->block_num);
/* Clear vreg_to_lvreg array */
for (i = 0; i < lvregs_len; i++)
vreg_to_lvreg [lvregs [i]] = 0;
lvregs_len = 0;
cfg->cbb = bb;
MONO_BB_FOR_EACH_INS (bb, ins) {
const char *spec = INS_INFO (ins->opcode);
int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
gboolean store, no_lvreg;
int sregs [MONO_MAX_SRC_REGS];
if (G_UNLIKELY (cfg->verbose_level > 2))
mono_print_ins (ins);
if (ins->opcode == OP_NOP)
continue;
/*
* We handle LDADDR here as well, since it can only be decomposed
* when variable addresses are known.
*/
if (ins->opcode == OP_LDADDR) {
MonoInst *var = (MonoInst *)ins->inst_p0;
if (var->opcode == OP_VTARG_ADDR) {
/* Happens on SPARC/S390 where vtypes are passed by reference */
MonoInst *vtaddr = var->inst_left;
if (vtaddr->opcode == OP_REGVAR) {
ins->opcode = OP_MOVE;
ins->sreg1 = vtaddr->dreg;
}
else if (var->inst_left->opcode == OP_REGOFFSET) {
ins->opcode = OP_LOAD_MEMBASE;
ins->inst_basereg = vtaddr->inst_basereg;
ins->inst_offset = vtaddr->inst_offset;
} else
NOT_IMPLEMENTED;
} else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
/* gsharedvt arg passed by ref */
g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
ins->opcode = OP_LOAD_MEMBASE;
ins->inst_basereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
} else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
MonoInst *load, *load2, *load3;
int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
int reg1, reg2, reg3;
MonoInst *info_var = cfg->gsharedvt_info_var;
MonoInst *locals_var = cfg->gsharedvt_locals_var;
/*
* gsharedvt local.
* Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
*/
g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
g_assert (info_var);
g_assert (locals_var);
/* Mark the instruction used to compute the locals var as used */
cfg->gsharedvt_locals_var_ins = NULL;
/* Load the offset */
if (info_var->opcode == OP_REGOFFSET) {
reg1 = alloc_ireg (cfg);
NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
} else if (info_var->opcode == OP_REGVAR) {
load = NULL;
reg1 = info_var->dreg;
} else {
g_assert_not_reached ();
}
reg2 = alloc_ireg (cfg);
NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
/* Load the locals area address */
reg3 = alloc_ireg (cfg);
if (locals_var->opcode == OP_REGOFFSET) {
NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
} else if (locals_var->opcode == OP_REGVAR) {
NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
} else {
g_assert_not_reached ();
}
/* Compute the address */
ins->opcode = OP_PADD;
ins->sreg1 = reg3;
ins->sreg2 = reg2;
mono_bblock_insert_before_ins (bb, ins, load3);
mono_bblock_insert_before_ins (bb, load3, load2);
if (load)
mono_bblock_insert_before_ins (bb, load2, load);
} else {
g_assert (var->opcode == OP_REGOFFSET);
ins->opcode = OP_ADD_IMM;
ins->sreg1 = var->inst_basereg;
ins->inst_imm = var->inst_offset;
}
*need_local_opts = TRUE;
spec = INS_INFO (ins->opcode);
}
if (ins->opcode < MONO_CEE_LAST) {
mono_print_ins (ins);
g_assert_not_reached ();
}
/*
* Store opcodes have destbasereg in the dreg, but in reality, it is an
* src register.
* FIXME:
*/
if (MONO_IS_STORE_MEMBASE (ins)) {
tmp_reg = ins->dreg;
ins->dreg = ins->sreg2;
ins->sreg2 = tmp_reg;
store = TRUE;
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (MONO_IS_STORE_MEMINDEX (ins))
g_assert_not_reached ();
else
store = FALSE;
no_lvreg = FALSE;
if (G_UNLIKELY (cfg->verbose_level > 2)) {
printf ("\t %.3s %d", spec, ins->dreg);
num_sregs = mono_inst_get_src_registers (ins, sregs);
for (srcindex = 0; srcindex < num_sregs; ++srcindex)
printf (" %d", sregs [srcindex]);
printf ("\n");
}
/***************/
/* DREG */
/***************/
regtype = spec [MONO_INST_DEST];
g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
prev_dreg = -1;
int dreg_using_dest_to_membase_op = -1;
if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
MonoInst *store_ins;
int store_opcode;
MonoInst *def_ins = ins;
int dreg = ins->dreg; /* The original vreg */
store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
if (var->opcode == OP_REGVAR) {
ins->dreg = var->dreg;
} else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
/*
* Instead of emitting a load+store, use a _membase opcode.
*/
g_assert (var->opcode == OP_REGOFFSET);
if (ins->opcode == OP_MOVE) {
NULLIFY_INS (ins);
def_ins = NULL;
} else {
dreg_using_dest_to_membase_op = ins->dreg;
ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
ins->inst_basereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
ins->dreg = -1;
}
spec = INS_INFO (ins->opcode);
} else {
guint32 lvreg;
g_assert (var->opcode == OP_REGOFFSET);
prev_dreg = ins->dreg;
/* Invalidate any previous lvreg for this vreg */
vreg_to_lvreg [ins->dreg] = 0;
lvreg = 0;
if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
regtype = 'l';
store_opcode = OP_STOREI8_MEMBASE_REG;
}
ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
mono_bblock_insert_after_ins (bb, ins, store_ins);
NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
mono_bblock_insert_after_ins (bb, ins, store_ins);
def_ins = store_ins;
}
else
#endif
{
g_assert (store_opcode != OP_STOREV_MEMBASE);
/* Try to fuse the store into the instruction itself */
/* FIXME: Add more instructions */
if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
ins->inst_imm = ins->inst_c0;
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
spec = INS_INFO (ins->opcode);
} else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
ins->opcode = store_opcode;
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
no_lvreg = TRUE;
tmp_reg = ins->dreg;
ins->dreg = ins->sreg2;
ins->sreg2 = tmp_reg;
store = TRUE;
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
// FIXME: The backends expect the base reg to be in inst_basereg
ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
ins->dreg = -1;
ins->inst_basereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
spec = INS_INFO (ins->opcode);
} else {
/* printf ("INS: "); mono_print_ins (ins); */
/* Create a store instruction */
NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
/* Insert it after the instruction */
mono_bblock_insert_after_ins (bb, ins, store_ins);
def_ins = store_ins;
/*
* We can't assign ins->dreg to var->dreg here, since the
* sregs could use it. So set a flag, and do it after
* the sregs.
*/
if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
dest_has_lvreg = TRUE;
}
}
}
if (def_ins && !live_range_start [dreg]) {
live_range_start [dreg] = def_ins;
live_range_start_bb [dreg] = bb;
}
if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
tmp->inst_c1 = dreg;
mono_bblock_insert_after_ins (bb, def_ins, tmp);
}
}
/************/
/* SREGS */
/************/
num_sregs = mono_inst_get_src_registers (ins, sregs);
for (srcindex = 0; srcindex < 3; ++srcindex) {
regtype = spec [MONO_INST_SRC1 + srcindex];
sreg = sregs [srcindex];
g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
MonoInst *var = get_vreg_to_inst (cfg, sreg);
MonoInst *use_ins = ins;
MonoInst *load_ins;
guint32 load_opcode;
if (var->opcode == OP_REGVAR) {
sregs [srcindex] = var->dreg;
//mono_inst_set_src_registers (ins, sregs);
live_range_end [sreg] = use_ins;
live_range_end_bb [sreg] = bb;
if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
/* var->dreg is a hreg */
tmp->inst_c1 = sreg;
mono_bblock_insert_after_ins (bb, ins, tmp);
}
continue;
}
g_assert (var->opcode == OP_REGOFFSET);
load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
g_assert (load_opcode != OP_LOADV_MEMBASE);
if (vreg_to_lvreg [sreg]) {
g_assert (vreg_to_lvreg [sreg] != -1);
/* The variable is already loaded to an lvreg */
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
sregs [srcindex] = vreg_to_lvreg [sreg];
//mono_inst_set_src_registers (ins, sregs);
continue;
}
/* Try to fuse the load into the instruction */
if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
sregs [0] = var->inst_basereg;
//mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
sregs [1] = var->inst_basereg;
//mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else {
if (MONO_IS_REAL_MOVE (ins)) {
ins->opcode = OP_NOP;
sreg = ins->dreg;
} else {
//printf ("%d ", srcindex); mono_print_ins (ins);
sreg = alloc_dreg (cfg, stacktypes [regtype]);
if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
if (var->dreg == prev_dreg) {
/*
* sreg refers to the value loaded by the load
* emitted below, but we need to use ins->dreg
* since it refers to the store emitted earlier.
*/
sreg = ins->dreg;
}
g_assert (sreg != -1);
if (var->dreg == dreg_using_dest_to_membase_op) {
if (cfg->verbose_level > 2)
printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg);
} else {
vreg_to_lvreg [var->dreg] = sreg;
}
if (lvregs_len >= lvregs_size) {
guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
lvregs = new_lvregs;
lvregs_size *= 2;
}
lvregs [lvregs_len ++] = var->dreg;
}
}
sregs [srcindex] = sreg;
//mono_inst_set_src_registers (ins, sregs);
#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
use_ins = load_ins;
}
else
#endif
{
#if SIZEOF_REGISTER == 4
g_assert (load_opcode != OP_LOADI8_MEMBASE);
#endif
NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
mono_bblock_insert_before_ins (bb, ins, load_ins);
use_ins = load_ins;
}
if (cfg->verbose_level > 2)
mono_print_ins_index (0, use_ins);
}
if (var->dreg < orig_next_vreg) {
live_range_end [var->dreg] = use_ins;
live_range_end_bb [var->dreg] = bb;
}
if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
tmp->inst_c1 = var->dreg;
mono_bblock_insert_after_ins (bb, ins, tmp);
}
}
}
mono_inst_set_src_registers (ins, sregs);
if (dest_has_lvreg) {
g_assert (ins->dreg != -1);
vreg_to_lvreg [prev_dreg] = ins->dreg;
if (lvregs_len >= lvregs_size) {
guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
lvregs = new_lvregs;
lvregs_size *= 2;
}
lvregs [lvregs_len ++] = prev_dreg;
dest_has_lvreg = FALSE;
}
if (store) {
tmp_reg = ins->dreg;
ins->dreg = ins->sreg2;
ins->sreg2 = tmp_reg;
}
if (MONO_IS_CALL (ins)) {
/* Clear vreg_to_lvreg array */
for (i = 0; i < lvregs_len; i++)
vreg_to_lvreg [lvregs [i]] = 0;
lvregs_len = 0;
} else if (ins->opcode == OP_NOP) {
ins->dreg = -1;
MONO_INST_NULLIFY_SREGS (ins);
}
if (cfg->verbose_level > 2)
mono_print_ins_index (1, ins);
}
/* Extend the live range based on the liveness info */
if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
for (i = 0; i < cfg->num_varinfo; i ++) {
MonoMethodVar *vi = MONO_VARINFO (cfg, i);
if (vreg_is_volatile (cfg, vi->vreg))
/* The liveness info is incomplete */
continue;
if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
/* Live from at least the first ins of this bb */
live_range_start [vi->vreg] = bb->code;
live_range_start_bb [vi->vreg] = bb;
}
if (mono_bitset_test_fast (bb->live_out_set, i)) {
/* Live at least until the last ins of this bb */
live_range_end [vi->vreg] = bb->last_ins;
live_range_end_bb [vi->vreg] = bb;
}
}
}
}
/*
* Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
* by storing the current native offset into MonoMethodVar->live_range_start/end.
*/
if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
for (i = 0; i < cfg->num_varinfo; ++i) {
int vreg = MONO_VARINFO (cfg, i)->vreg;
MonoInst *ins;
if (live_range_start [vreg]) {
MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
ins->inst_c0 = i;
ins->inst_c1 = vreg;
mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
}
if (live_range_end [vreg]) {
MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
ins->inst_c0 = i;
ins->inst_c1 = vreg;
if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
mono_add_ins_to_end (live_range_end_bb [vreg], ins);
else
mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
}
}
}
if (cfg->gsharedvt_locals_var_ins) {
/* Nullify if unused */
cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
cfg->gsharedvt_locals_var_ins->inst_imm = 0;
}
g_free (live_range_start);
g_free (live_range_end);
g_free (live_range_start_bb);
g_free (live_range_end_bb);
}
/**
* FIXME:
* - use 'iadd' instead of 'int_add'
* - handling ovf opcodes: decompose in method_to_ir.
* - unify iregs/fregs
* -> partly done, the missing parts are:
* - a more complete unification would involve unifying the hregs as well, so
* code wouldn't need if (fp) all over the place. but that would mean the hregs
* would no longer map to the machine hregs, so the code generators would need to
* be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
* wouldn't work any more. Duplicating the code in mono_local_regalloc () into
* fp/non-fp branches speeds it up by about 15%.
* - use sext/zext opcodes instead of shifts
* - add OP_ICALL
* - get rid of TEMPLOADs if possible and use vregs instead
* - clean up usage of OP_P/OP_ opcodes
* - cleanup usage of DUMMY_USE
* - cleanup the setting of ins->type for MonoInst's which are pushed on the
* stack
* - set the stack type and allocate a dreg in the EMIT_NEW macros
* - get rid of all the <foo>2 stuff when the new JIT is ready.
* - make sure handle_stack_args () is called before the branch is emitted
* - when the new IR is done, get rid of all unused stuff
* - COMPARE/BEQ as separate instructions or unify them ?
* - keeping them separate allows specialized compare instructions like
* compare_imm, compare_membase
* - most back ends unify fp compare+branch, fp compare+ceq
* - integrate mono_save_args into inline_method
* - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
* - handle long shift opts on 32 bit platforms somehow: they require
* 3 sregs (2 for arg1 and 1 for arg2)
* - make byref a 'normal' type.
* - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
* variable if needed.
* - do not start a new IL level bblock when cfg->cbb is changed by a function call
* like inline_method.
* - remove inlining restrictions
* - fix LNEG and enable cfold of INEG
* - generalize x86 optimizations like ldelema as a peephole optimization
* - add store_mem_imm for amd64
* - optimize the loading of the interruption flag in the managed->native wrappers
* - avoid special handling of OP_NOP in passes
* - move code inserting instructions into one function/macro.
* - try a coalescing phase after liveness analysis
* - add float -> vreg conversion + local optimizations on !x86
* - figure out how to handle decomposed branches during optimizations, ie.
* compare+branch, op_jump_table+op_br etc.
* - promote RuntimeXHandles to vregs
* - vtype cleanups:
* - add a NEW_VARLOADA_VREG macro
* - the vtype optimizations are blocked by the LDADDR opcodes generated for
* accessing vtype fields.
* - get rid of I8CONST on 64 bit platforms
* - dealing with the increase in code size due to branches created during opcode
* decomposition:
* - use extended basic blocks
* - all parts of the JIT
* - handle_global_vregs () && local regalloc
* - avoid introducing global vregs during decomposition, like 'vtable' in isinst
* - sources of increase in code size:
* - vtypes
* - long compares
* - isinst and castclass
* - lvregs not allocated to global registers even if used multiple times
* - call cctors outside the JIT, to make -v output more readable and JIT timings more
* meaningful.
* - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
* - add all micro optimizations from the old JIT
* - put tree optimizations into the deadce pass
* - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
* specific function.
* - unify the float comparison opcodes with the other comparison opcodes, i.e.
* fcompare + branchCC.
* - create a helper function for allocating a stack slot, taking into account
* MONO_CFG_HAS_SPILLUP.
* - merge r68207.
* - optimize mono_regstate2_alloc_int/float.
* - fix the pessimistic handling of variables accessed in exception handler blocks.
* - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
* parts of the tree could be separated by other instructions, killing the tree
* arguments, or stores killing loads etc. Also, should we fold loads into other
* instructions if the result of the load is used multiple times ?
* - make the REM_IMM optimization in mini-x86.c arch-independent.
* - LAST MERGE: 108395.
* - when returning vtypes in registers, generate IR and append it to the end of the
* last bb instead of doing it in the epilog.
* - change the store opcodes so they use sreg1 instead of dreg to store the base register.
*/
/*
NOTES
-----
- When to decompose opcodes:
- earlier: this makes some optimizations hard to implement, since the low level IR
no longer contains the necessary information. But it is easier to do.
- later: harder to implement, enables more optimizations.
- Branches inside bblocks:
- created when decomposing complex opcodes.
- branches to another bblock: harmless, but not tracked by the branch
optimizations, so need to branch to a label at the start of the bblock.
- branches to inside the same bblock: very problematic, trips up the local
reg allocator. Can be fixed by spitting the current bblock, but that is a
complex operation, since some local vregs can become global vregs etc.
- Local/global vregs:
- local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
local register allocator.
- global vregs: used in more than one bblock. Have an associated MonoMethodVar
structure, created by mono_create_var (). Assigned to hregs or the stack by
the global register allocator.
- When to do optimizations like alu->alu_imm:
- earlier -> saves work later on since the IR will be smaller/simpler
- later -> can work on more instructions
- Handling of valuetypes:
- When a vtype is pushed on the stack, a new temporary is created, an
instruction computing its address (LDADDR) is emitted and pushed on
the stack. Need to optimize cases when the vtype is used immediately as in
argument passing, stloc etc.
- Instead of the to_end stuff in the old JIT, simply call the function handling
the values on the stack before emitting the last instruction of the bb.
*/
#else /* !DISABLE_JIT */
MONO_EMPTY_SOURCE_FILE (method_to_ir);
#endif /* !DISABLE_JIT */
| /**
* \file
* Convert CIL to the JIT internal representation
*
* Author:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2002 Ximian, Inc.
* Copyright 2003-2010 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <glib.h>
#include <mono/utils/mono-compiler.h>
#include "mini.h"
#ifndef DISABLE_JIT
#include <signal.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <math.h>
#include <string.h>
#include <ctype.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#include <mono/utils/memcheck.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/assembly-internals.h>
#include <mono/metadata/attrdefs.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/class-abi-details.h>
#include <mono/metadata/object.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/exception-internals.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/mono-endian.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/debug-internals.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/profiler.h>
#include <mono/metadata/monitor.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/metadata/mono-basic-block.h>
#include <mono/metadata/reflection-internals.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/mono-utils-debug.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/icall-decl.h>
#include "mono/metadata/icall-signatures.h"
#include "trace.h"
#include "ir-emit.h"
#include "jit-icalls.h"
#include <mono/jit/jit.h>
#include "seq-points.h"
#include "aot-compiler.h"
#include "mini-llvm.h"
#include "mini-runtime.h"
#include "llvmonly-runtime.h"
#include "mono/utils/mono-tls-inline.h"
#define BRANCH_COST 10
#define CALL_COST 10
/* Used for the JIT */
#define INLINE_LENGTH_LIMIT 20
/*
* The aot and jit inline limits should be different,
* since aot sees the whole program so we can let opt inline methods for us,
* while the jit only sees one method, so we have to inline things ourselves.
*/
/* Used by LLVM AOT */
#define LLVM_AOT_INLINE_LENGTH_LIMIT 30
/* Used to LLVM JIT */
#define LLVM_JIT_INLINE_LENGTH_LIMIT 100
static const gboolean debug_tailcall = FALSE; // logging
static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret
gboolean
mono_tailcall_print_enabled (void)
{
return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL);
}
void
mono_tailcall_print (const char *format, ...)
{
if (!mono_tailcall_print_enabled ())
return;
va_list args;
va_start (args, format);
g_printv (format, args);
va_end (args);
}
/* These have 'cfg' as an implicit argument */
#define INLINE_FAILURE(msg) do { \
if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
inline_failure (cfg, msg); \
goto exception_exit; \
} \
} while (0)
#define CHECK_CFG_EXCEPTION do {\
if (cfg->exception_type != MONO_EXCEPTION_NONE) \
goto exception_exit; \
} while (0)
#define FIELD_ACCESS_FAILURE(method, field) do { \
field_access_failure ((cfg), (method), (field)); \
goto exception_exit; \
} while (0)
#define GENERIC_SHARING_FAILURE(opcode) do { \
if (cfg->gshared) { \
gshared_failure (cfg, opcode, __FILE__, __LINE__); \
goto exception_exit; \
} \
} while (0)
#define GSHAREDVT_FAILURE(opcode) do { \
if (cfg->gsharedvt) { \
gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
goto exception_exit; \
} \
} while (0)
#define OUT_OF_MEMORY_FAILURE do { \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
mono_error_set_out_of_memory (cfg->error, ""); \
goto exception_exit; \
} while (0)
#define DISABLE_AOT(cfg) do { \
if ((cfg)->verbose_level >= 2) \
printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
(cfg)->disable_aot = TRUE; \
} while (0)
#define LOAD_ERROR do { \
break_on_unverified (); \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
goto exception_exit; \
} while (0)
#define TYPE_LOAD_ERROR(klass) do { \
cfg->exception_ptr = klass; \
LOAD_ERROR; \
} while (0)
#define CHECK_CFG_ERROR do {\
if (!is_ok (cfg->error)) { \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
goto mono_error_exit; \
} \
} while (0)
int mono_op_to_op_imm (int opcode);
int mono_op_to_op_imm_noemul (int opcode);
static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty);
static MonoInst*
convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins);
/* helper methods signatures */
/* type loading helpers */
static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1")
static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1");
/*
* Instruction metadata
*/
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
#define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
#define NONE ' '
#define IREG 'i'
#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
#if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
#define LREG IREG
#else
#define LREG 'l'
#endif
/* keep in sync with the enum in mini.h */
const char
mini_ins_info[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
#define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
#define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
/*
* This should contain the index of the last sreg + 1. This is not the same
* as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
*/
const gint8 mini_ins_sreg_counts[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
guint32
mono_alloc_ireg (MonoCompile *cfg)
{
return alloc_ireg (cfg);
}
guint32
mono_alloc_lreg (MonoCompile *cfg)
{
return alloc_lreg (cfg);
}
guint32
mono_alloc_freg (MonoCompile *cfg)
{
return alloc_freg (cfg);
}
guint32
mono_alloc_preg (MonoCompile *cfg)
{
return alloc_preg (cfg);
}
guint32
mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
{
return alloc_dreg (cfg, stack_type);
}
/*
* mono_alloc_ireg_ref:
*
* Allocate an IREG, and mark it as holding a GC ref.
*/
guint32
mono_alloc_ireg_ref (MonoCompile *cfg)
{
return alloc_ireg_ref (cfg);
}
/*
* mono_alloc_ireg_mp:
*
* Allocate an IREG, and mark it as holding a managed pointer.
*/
guint32
mono_alloc_ireg_mp (MonoCompile *cfg)
{
return alloc_ireg_mp (cfg);
}
/*
* mono_alloc_ireg_copy:
*
* Allocate an IREG with the same GC type as VREG.
*/
guint32
mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
{
if (vreg_is_ref (cfg, vreg))
return alloc_ireg_ref (cfg);
else if (vreg_is_mp (cfg, vreg))
return alloc_ireg_mp (cfg);
else
return alloc_ireg (cfg);
}
guint
mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
{
if (m_type_is_byref (type))
return OP_MOVE;
type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_MOVE;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_MOVE;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_MOVE;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_MOVE;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_MOVE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#if SIZEOF_REGISTER == 8
return OP_MOVE;
#else
return OP_LMOVE;
#endif
case MONO_TYPE_R4:
return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
case MONO_TYPE_R8:
return OP_FMOVE;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_XMOVE;
return OP_VMOVE;
case MONO_TYPE_TYPEDBYREF:
return OP_VMOVE;
case MONO_TYPE_GENERICINST:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_XMOVE;
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_type_var_is_vt (type))
return OP_VMOVE;
else
return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
default:
g_error ("unknown type 0x%02x in type_to_regstore", type->type);
}
return -1;
}
void
mono_print_bb (MonoBasicBlock *bb, const char *msg)
{
int i;
MonoInst *tree;
GString *str = g_string_new ("");
g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
for (i = 0; i < bb->in_count; ++i)
g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
g_string_append_printf (str, ", OUT: ");
for (i = 0; i < bb->out_count; ++i)
g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
g_string_append_printf (str, " ]\n");
g_print ("%s", str->str);
g_string_free (str, TRUE);
for (tree = bb->code; tree; tree = tree->next)
mono_print_ins_index (-1, tree);
}
static MONO_NEVER_INLINE gboolean
break_on_unverified (void)
{
if (mini_debug_options.break_on_unverified) {
G_BREAKPOINT ();
return TRUE;
}
return FALSE;
}
static void
clear_cfg_error (MonoCompile *cfg)
{
mono_error_cleanup (cfg->error);
error_init (cfg->error);
}
static MONO_NEVER_INLINE void
field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
{
char *method_fname = mono_method_full_name (method, TRUE);
char *field_fname = mono_field_full_name (field);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_generic_error (cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
g_free (method_fname);
g_free (field_fname);
}
static MONO_NEVER_INLINE void
inline_failure (MonoCompile *cfg, const char *msg)
{
if (cfg->verbose_level >= 2)
printf ("inline failed: %s\n", msg);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
}
static MONO_NEVER_INLINE void
gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
if (cfg->verbose_level > 2)
printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
static MONO_NEVER_INLINE void
gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
{
cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
if (cfg->verbose_level >= 2)
printf ("%s\n", cfg->exception_message);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
}
void
mini_set_inline_failure (MonoCompile *cfg, const char *msg)
{
if (cfg->verbose_level >= 2)
printf ("inline failed: %s\n", msg);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
}
/*
* When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
* foo<T> (int i) { ldarg.0; box T; }
*/
#define UNVERIFIED do { \
if (cfg->gsharedvt) { \
if (cfg->verbose_level > 2) \
printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
goto exception_exit; \
} \
break_on_unverified (); \
goto unverified; \
} while (0)
#define GET_BBLOCK(cfg,tblock,ip) do { \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
NEW_BBLOCK (cfg, (tblock)); \
(tblock)->cil_code = (ip); \
ADD_BBLOCK (cfg, (tblock)); \
} \
} while (0)
/* Emit conversions so both operands of a binary opcode are of the same type */
static void
add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
{
MonoInst *arg1 = *arg1_ref;
MonoInst *arg2 = *arg2_ref;
if (cfg->r4fp &&
((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
(arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
MonoInst *conv;
/* Mixing r4/r8 is allowed by the spec */
if (arg1->type == STACK_R4) {
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
conv->type = STACK_R8;
ins->sreg1 = dreg;
*arg1_ref = conv;
}
if (arg2->type == STACK_R4) {
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
conv->type = STACK_R8;
ins->sreg2 = dreg;
*arg2_ref = conv;
}
}
#if SIZEOF_REGISTER == 8
/* FIXME: Need to add many more cases */
if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
MonoInst *widen;
int dr = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
(ins)->sreg2 = widen->dreg;
}
#endif
}
#define ADD_UNOP(op) do { \
MONO_INST_NEW (cfg, ins, (op)); \
sp--; \
ins->sreg1 = sp [0]->dreg; \
type_from_op (cfg, ins, sp [0], NULL); \
CHECK_TYPE (ins); \
(ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
MONO_ADD_INS ((cfg)->cbb, (ins)); \
*sp++ = mono_decompose_opcode (cfg, ins); \
} while (0)
#define ADD_BINCOND(next_block) do { \
MonoInst *cmp; \
sp -= 2; \
MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
cmp->sreg1 = sp [0]->dreg; \
cmp->sreg2 = sp [1]->dreg; \
add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
type_from_op (cfg, cmp, sp [0], sp [1]); \
CHECK_TYPE (cmp); \
type_from_op (cfg, ins, sp [0], sp [1]); \
ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
GET_BBLOCK (cfg, tblock, target); \
link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_true_bb = tblock; \
if ((next_block)) { \
link_bblock (cfg, cfg->cbb, (next_block)); \
ins->inst_false_bb = (next_block); \
start_new_bblock = 1; \
} else { \
GET_BBLOCK (cfg, tblock, next_ip); \
link_bblock (cfg, cfg->cbb, tblock); \
ins->inst_false_bb = tblock; \
start_new_bblock = 2; \
} \
if (sp != stack_start) { \
handle_stack_args (cfg, stack_start, sp - stack_start); \
CHECK_UNVERIFIABLE (cfg); \
} \
MONO_ADD_INS (cfg->cbb, cmp); \
MONO_ADD_INS (cfg->cbb, ins); \
} while (0)
/* *
* link_bblock: Links two basic blocks
*
* links two basic blocks in the control flow graph, the 'from'
* argument is the starting block and the 'to' argument is the block
* the control flow ends to after 'from'.
*/
static void
link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
MonoBasicBlock **newa;
int i, found;
#if 0
if (from->cil_code) {
if (to->cil_code)
printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
else
printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
} else {
if (to->cil_code)
printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
else
printf ("edge from entry to exit\n");
}
#endif
found = FALSE;
for (i = 0; i < from->out_count; ++i) {
if (to == from->out_bb [i]) {
found = TRUE;
break;
}
}
if (!found) {
newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
for (i = 0; i < from->out_count; ++i) {
newa [i] = from->out_bb [i];
}
newa [i] = to;
from->out_count++;
from->out_bb = newa;
}
found = FALSE;
for (i = 0; i < to->in_count; ++i) {
if (from == to->in_bb [i]) {
found = TRUE;
break;
}
}
if (!found) {
newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
for (i = 0; i < to->in_count; ++i) {
newa [i] = to->in_bb [i];
}
newa [i] = from;
to->in_count++;
to->in_bb = newa;
}
}
void
mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
link_bblock (cfg, from, to);
}
static void
mono_create_spvar_for_region (MonoCompile *cfg, int region);
static void
mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end)
{
MonoBasicBlock *bb = cfg->cil_offset_to_bb [start];
//start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
g_assert (bb);
if (cfg->verbose_level > 1)
g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num);
for (; bb && bb->real_offset < end; bb = bb->next_bb) {
//no one claimed this bb, take it.
if (bb->region == -1) {
bb->region = region;
continue;
}
//current region is an early handler, bail
if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) {
continue;
}
//current region is a try, only overwrite if new region is a handler
if ((region & (0xf << 4)) != MONO_REGION_TRY) {
bb->region = region;
}
}
if (cfg->spvars)
mono_create_spvar_for_region (cfg, region);
}
static void
compute_bb_regions (MonoCompile *cfg)
{
MonoBasicBlock *bb;
MonoMethodHeader *header = cfg->header;
int i;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
bb->region = -1;
for (i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset);
guint handler_region;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
else
handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len);
mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len);
}
if (cfg->verbose_level > 2) {
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
}
}
static gboolean
ip_in_finally_clause (MonoCompile *cfg, int offset)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
continue;
if (MONO_OFFSET_IN_HANDLER (clause, offset))
return TRUE;
}
return FALSE;
}
/* Find clauses between ip and target, from inner to outer */
static GList*
mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
GList *res = NULL;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
(!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause));
leave->index = i;
leave->clause = clause;
res = g_list_append_mempool (cfg->mempool, res, leave);
}
}
return res;
}
static void
mono_create_spvar_for_region (MonoCompile *cfg, int region)
{
MonoInst *var;
var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
if (var)
return;
var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
}
MonoInst *
mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
{
return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
}
static MonoInst*
mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
{
MonoInst *var;
var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
if (var)
return var;
var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL);
/* prevent it from being register allocated */
var->flags |= MONO_INST_VOLATILE;
g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
return var;
}
/*
* Returns the type used in the eval stack when @type is loaded.
* FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
*/
void
mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
{
MonoClass *klass;
type = mini_get_underlying_type (type);
inst->klass = klass = mono_class_from_mono_type_internal (type);
if (m_type_is_byref (type)) {
inst->type = STACK_MP;
return;
}
handle_enum:
switch (type->type) {
case MONO_TYPE_VOID:
inst->type = STACK_INV;
return;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
inst->type = STACK_I4;
return;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
inst->type = STACK_PTR;
return;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
inst->type = STACK_OBJ;
return;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
inst->type = STACK_I8;
return;
case MONO_TYPE_R4:
inst->type = cfg->r4_stack_type;
break;
case MONO_TYPE_R8:
inst->type = STACK_R8;
return;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
} else {
inst->klass = klass;
inst->type = STACK_VTYPE;
return;
}
case MONO_TYPE_TYPEDBYREF:
inst->klass = mono_defaults.typed_reference_class;
inst->type = STACK_VTYPE;
return;
case MONO_TYPE_GENERICINST:
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_is_gsharedvt_type (type)) {
g_assert (cfg->gsharedvt);
inst->type = STACK_VTYPE;
} else {
mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
}
return;
default:
g_error ("unknown type 0x%02x in eval stack type", type->type);
}
}
/*
* The following tables are used to quickly validate the IL code in type_from_op ().
*/
#define IF_P8(v) (SIZEOF_VOID_P == 8 ? v : STACK_INV)
#define IF_P8_I8 IF_P8(STACK_I8)
#define IF_P8_PTR IF_P8(STACK_PTR)
static const char
bin_num_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
{STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
{STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
};
static const char
neg_table [] = {
STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
};
/* reduce the size of this table */
static const char
bin_int_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
};
#define P1 (SIZEOF_VOID_P == 8)
static const char
bin_comp_table [STACK_MAX] [STACK_MAX] = {
/* Inv i L p F & O vt r4 */
{0},
{0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
{0, 0, 1,P1, 0, 0, 0, 0}, /* L, int64 */
{0, 1,P1, 1, 0, 2, 4, 0}, /* p, ptr */
{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
{0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
{0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
{0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
{0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
};
#undef P1
/* reduce the size of this table */
static const char
shift_table [STACK_MAX] [STACK_MAX] = {
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
{STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
};
/*
* Tables to map from the non-specific opcode to the matching
* type-specific opcode.
*/
/* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
static const guint16
binops_op_map [STACK_MAX] = {
0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
};
/* handles from CEE_NEG to CEE_CONV_U8 */
static const guint16
unops_op_map [STACK_MAX] = {
0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
};
/* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
static const guint16
ovfops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
};
/* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
static const guint16
ovf2ops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
};
/* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
static const guint16
ovf3ops_op_map [STACK_MAX] = {
0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
};
/* handles from CEE_BEQ to CEE_BLT_UN */
static const guint16
beqops_op_map [STACK_MAX] = {
0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
};
/* handles from CEE_CEQ to CEE_CLT_UN */
static const guint16
ceqops_op_map [STACK_MAX] = {
0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
};
/*
* Sets ins->type (the type on the eval stack) according to the
* type of the opcode and the arguments to it.
* Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
*
* FIXME: this function sets ins->type unconditionally in some cases, but
* it should set it to invalid for some types (a conv.x on an object)
*/
static void
type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
{
switch (ins->opcode) {
/* binops */
case MONO_CEE_ADD:
case MONO_CEE_SUB:
case MONO_CEE_MUL:
case MONO_CEE_DIV:
case MONO_CEE_REM:
/* FIXME: check unverifiable args for STACK_MP */
ins->type = bin_num_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case MONO_CEE_DIV_UN:
case MONO_CEE_REM_UN:
case MONO_CEE_AND:
case MONO_CEE_OR:
case MONO_CEE_XOR:
ins->type = bin_int_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case MONO_CEE_SHL:
case MONO_CEE_SHR:
case MONO_CEE_SHR_UN:
ins->type = shift_table [src1->type] [src2->type];
ins->opcode += binops_op_map [ins->type];
break;
case OP_COMPARE:
case OP_LCOMPARE:
case OP_ICOMPARE:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE;
else if (src1->type == STACK_R4)
ins->opcode = OP_RCOMPARE;
else if (src1->type == STACK_R8)
ins->opcode = OP_FCOMPARE;
else
ins->opcode = OP_ICOMPARE;
break;
case OP_ICOMPARE_IMM:
ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
ins->opcode = OP_LCOMPARE_IMM;
break;
case MONO_CEE_BEQ:
case MONO_CEE_BGE:
case MONO_CEE_BGT:
case MONO_CEE_BLE:
case MONO_CEE_BLT:
case MONO_CEE_BNE_UN:
case MONO_CEE_BGE_UN:
case MONO_CEE_BGT_UN:
case MONO_CEE_BLE_UN:
case MONO_CEE_BLT_UN:
ins->opcode += beqops_op_map [src1->type];
break;
case OP_CEQ:
ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
ins->opcode += ceqops_op_map [src1->type];
break;
case OP_CGT:
case OP_CGT_UN:
case OP_CLT:
case OP_CLT_UN:
ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
ins->opcode += ceqops_op_map [src1->type];
break;
/* unops */
case MONO_CEE_NEG:
ins->type = neg_table [src1->type];
ins->opcode += unops_op_map [ins->type];
break;
case MONO_CEE_NOT:
if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
ins->type = src1->type;
else
ins->type = STACK_INV;
ins->opcode += unops_op_map [ins->type];
break;
case MONO_CEE_CONV_I1:
case MONO_CEE_CONV_I2:
case MONO_CEE_CONV_I4:
case MONO_CEE_CONV_U4:
ins->type = STACK_I4;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_R_UN:
ins->type = STACK_R8;
switch (src1->type) {
case STACK_I4:
case STACK_PTR:
ins->opcode = OP_ICONV_TO_R_UN;
break;
case STACK_I8:
ins->opcode = OP_LCONV_TO_R_UN;
break;
case STACK_R4:
ins->opcode = OP_RCONV_TO_R8;
break;
case STACK_R8:
ins->opcode = OP_FMOVE;
break;
}
break;
case MONO_CEE_CONV_OVF_I1:
case MONO_CEE_CONV_OVF_U1:
case MONO_CEE_CONV_OVF_I2:
case MONO_CEE_CONV_OVF_U2:
case MONO_CEE_CONV_OVF_I4:
case MONO_CEE_CONV_OVF_U4:
ins->type = STACK_I4;
ins->opcode += ovf3ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I_UN:
case MONO_CEE_CONV_OVF_U_UN:
ins->type = STACK_PTR;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I1_UN:
case MONO_CEE_CONV_OVF_I2_UN:
case MONO_CEE_CONV_OVF_I4_UN:
case MONO_CEE_CONV_OVF_U1_UN:
case MONO_CEE_CONV_OVF_U2_UN:
case MONO_CEE_CONV_OVF_U4_UN:
ins->type = STACK_I4;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_U:
ins->type = STACK_PTR;
switch (src1->type) {
case STACK_I4:
ins->opcode = OP_ICONV_TO_U;
break;
case STACK_PTR:
case STACK_MP:
case STACK_OBJ:
#if TARGET_SIZEOF_VOID_P == 8
ins->opcode = OP_LCONV_TO_U;
#else
ins->opcode = OP_MOVE;
#endif
break;
case STACK_I8:
ins->opcode = OP_LCONV_TO_U;
break;
case STACK_R8:
if (TARGET_SIZEOF_VOID_P == 8)
ins->opcode = OP_FCONV_TO_U8;
else
ins->opcode = OP_FCONV_TO_U4;
break;
case STACK_R4:
if (TARGET_SIZEOF_VOID_P == 8)
ins->opcode = OP_RCONV_TO_U8;
else
ins->opcode = OP_RCONV_TO_U4;
break;
}
break;
case MONO_CEE_CONV_I8:
case MONO_CEE_CONV_U8:
ins->type = STACK_I8;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_I8:
case MONO_CEE_CONV_OVF_U8:
ins->type = STACK_I8;
ins->opcode += ovf3ops_op_map [src1->type];
break;
case MONO_CEE_CONV_OVF_U8_UN:
case MONO_CEE_CONV_OVF_I8_UN:
ins->type = STACK_I8;
ins->opcode += ovf2ops_op_map [src1->type];
break;
case MONO_CEE_CONV_R4:
ins->type = cfg->r4_stack_type;
ins->opcode += unops_op_map [src1->type];
break;
case MONO_CEE_CONV_R8:
ins->type = STACK_R8;
ins->opcode += unops_op_map [src1->type];
break;
case OP_CKFINITE:
ins->type = STACK_R8;
break;
case MONO_CEE_CONV_U2:
case MONO_CEE_CONV_U1:
ins->type = STACK_I4;
ins->opcode += ovfops_op_map [src1->type];
break;
case MONO_CEE_CONV_I:
case MONO_CEE_CONV_OVF_I:
case MONO_CEE_CONV_OVF_U:
ins->type = STACK_PTR;
ins->opcode += ovfops_op_map [src1->type];
break;
case MONO_CEE_ADD_OVF:
case MONO_CEE_ADD_OVF_UN:
case MONO_CEE_MUL_OVF:
case MONO_CEE_MUL_OVF_UN:
case MONO_CEE_SUB_OVF:
case MONO_CEE_SUB_OVF_UN:
ins->type = bin_num_table [src1->type] [src2->type];
ins->opcode += ovfops_op_map [src1->type];
if (ins->type == STACK_R8)
ins->type = STACK_INV;
break;
case OP_LOAD_MEMBASE:
ins->type = STACK_PTR;
break;
case OP_LOADI1_MEMBASE:
case OP_LOADU1_MEMBASE:
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
ins->type = STACK_PTR;
break;
case OP_LOADI8_MEMBASE:
ins->type = STACK_I8;
break;
case OP_LOADR4_MEMBASE:
ins->type = cfg->r4_stack_type;
break;
case OP_LOADR8_MEMBASE:
ins->type = STACK_R8;
break;
default:
g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
break;
}
if (ins->type == STACK_MP) {
if (src1->type == STACK_MP)
ins->klass = src1->klass;
else
ins->klass = mono_defaults.object_class;
}
}
void
mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
{
type_from_op (cfg, ins, src1, src2);
}
static MonoClass*
ldind_to_type (int op)
{
switch (op) {
case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class;
case MONO_CEE_LDIND_U1: return mono_defaults.byte_class;
case MONO_CEE_LDIND_I2: return mono_defaults.int16_class;
case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class;
case MONO_CEE_LDIND_I4: return mono_defaults.int32_class;
case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class;
case MONO_CEE_LDIND_I8: return mono_defaults.int64_class;
case MONO_CEE_LDIND_I: return mono_defaults.int_class;
case MONO_CEE_LDIND_R4: return mono_defaults.single_class;
case MONO_CEE_LDIND_R8: return mono_defaults.double_class;
case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
default: g_error ("Unknown ldind type %d", op);
}
}
static MonoClass*
stind_to_type (int op)
{
switch (op) {
case MONO_CEE_STIND_I1: return mono_defaults.sbyte_class;
case MONO_CEE_STIND_I2: return mono_defaults.int16_class;
case MONO_CEE_STIND_I4: return mono_defaults.int32_class;
case MONO_CEE_STIND_I8: return mono_defaults.int64_class;
case MONO_CEE_STIND_I: return mono_defaults.int_class;
case MONO_CEE_STIND_R4: return mono_defaults.single_class;
case MONO_CEE_STIND_R8: return mono_defaults.double_class;
case MONO_CEE_STIND_REF: return mono_defaults.object_class;
default: g_error ("Unknown stind type %d", op);
}
}
#if 0
static const char
param_table [STACK_MAX] [STACK_MAX] = {
{0},
};
static int
check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
{
int i;
if (sig->hasthis) {
switch (args->type) {
case STACK_I4:
case STACK_I8:
case STACK_R8:
case STACK_VTYPE:
case STACK_INV:
return 0;
}
args++;
}
for (i = 0; i < sig->param_count; ++i) {
switch (args [i].type) {
case STACK_INV:
return 0;
case STACK_MP:
if (m_type_is_byref (!sig->params [i]))
return 0;
continue;
case STACK_OBJ:
if (m_type_is_byref (sig->params [i]))
return 0;
switch (m_type_is_byref (sig->params [i])) {
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
break;
default:
return 0;
}
continue;
case STACK_R8:
if (m_type_is_byref (sig->params [i]))
return 0;
if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
return 0;
continue;
case STACK_PTR:
case STACK_I4:
case STACK_I8:
case STACK_VTYPE:
break;
}
/*if (!param_table [args [i].type] [sig->params [i]->type])
return 0;*/
}
return 1;
}
#endif
/*
* The got_var contains the address of the Global Offset Table when AOT
* compiling.
*/
MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
}
return cfg->got_var;
}
static void
mono_create_rgctx_var (MonoCompile *cfg)
{
if (!cfg->rgctx_var) {
cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* force the var to be stack allocated */
if (!cfg->llvm_only)
cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
}
}
static MonoInst *
mono_get_mrgctx_var (MonoCompile *cfg)
{
g_assert (cfg->gshared);
mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
static MonoInst *
mono_get_vtable_var (MonoCompile *cfg)
{
g_assert (cfg->gshared);
/* The mrgctx and the vtable are stored in the same var */
mono_create_rgctx_var (cfg);
return cfg->rgctx_var;
}
static MonoType*
type_from_stack_type (MonoInst *ins) {
switch (ins->type) {
case STACK_I4: return mono_get_int32_type ();
case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
case STACK_PTR: return mono_get_int_type ();
case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class);
case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
case STACK_MP:
return m_class_get_this_arg (ins->klass);
case STACK_OBJ: return mono_get_object_type ();
case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
default:
g_error ("stack type %d to monotype not handled\n", ins->type);
}
return NULL;
}
MonoStackType
mini_type_to_stack_type (MonoCompile *cfg, MonoType *t)
{
t = mini_type_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return STACK_I4;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return STACK_PTR;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return STACK_OBJ;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return STACK_I8;
case MONO_TYPE_R4:
return (MonoStackType)cfg->r4_stack_type;
case MONO_TYPE_R8:
return STACK_R8;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
return STACK_VTYPE;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (t))
return STACK_VTYPE;
else
return STACK_OBJ;
break;
default:
g_assert_not_reached ();
}
return (MonoStackType)-1;
}
static MonoClass*
array_access_to_klass (int opcode)
{
switch (opcode) {
case MONO_CEE_LDELEM_U1:
return mono_defaults.byte_class;
case MONO_CEE_LDELEM_U2:
return mono_defaults.uint16_class;
case MONO_CEE_LDELEM_I:
case MONO_CEE_STELEM_I:
return mono_defaults.int_class;
case MONO_CEE_LDELEM_I1:
case MONO_CEE_STELEM_I1:
return mono_defaults.sbyte_class;
case MONO_CEE_LDELEM_I2:
case MONO_CEE_STELEM_I2:
return mono_defaults.int16_class;
case MONO_CEE_LDELEM_I4:
case MONO_CEE_STELEM_I4:
return mono_defaults.int32_class;
case MONO_CEE_LDELEM_U4:
return mono_defaults.uint32_class;
case MONO_CEE_LDELEM_I8:
case MONO_CEE_STELEM_I8:
return mono_defaults.int64_class;
case MONO_CEE_LDELEM_R4:
case MONO_CEE_STELEM_R4:
return mono_defaults.single_class;
case MONO_CEE_LDELEM_R8:
case MONO_CEE_STELEM_R8:
return mono_defaults.double_class;
case MONO_CEE_LDELEM_REF:
case MONO_CEE_STELEM_REF:
return mono_defaults.object_class;
default:
g_assert_not_reached ();
}
return NULL;
}
/*
* We try to share variables when possible
*/
static MonoInst *
mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
{
MonoInst *res;
int pos, vnum;
MonoType *type;
type = type_from_stack_type (ins);
/* inlining can result in deeper stacks */
if (cfg->inline_depth || slot >= cfg->header->max_stack)
return mono_compile_create_var (cfg, type, OP_LOCAL);
pos = ins->type - 1 + slot * STACK_MAX;
switch (ins->type) {
case STACK_I4:
case STACK_I8:
case STACK_R8:
case STACK_PTR:
case STACK_MP:
case STACK_OBJ:
if ((vnum = cfg->intvars [pos]))
return cfg->varinfo [vnum];
res = mono_compile_create_var (cfg, type, OP_LOCAL);
cfg->intvars [pos] = res->inst_c0;
break;
default:
res = mono_compile_create_var (cfg, type, OP_LOCAL);
}
return res;
}
static void
mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
{
/*
* Don't use this if a generic_context is set, since that means AOT can't
* look up the method using just the image+token.
* table == 0 means this is a reference made from a wrapper.
*/
if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
jump_info_token->image = image;
jump_info_token->token = token;
g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
}
}
/*
* This function is called to handle items that are left on the evaluation stack
* at basic block boundaries. What happens is that we save the values to local variables
* and we reload them later when first entering the target basic block (with the
* handle_loaded_temps () function).
* A single joint point will use the same variables (stored in the array bb->out_stack or
* bb->in_stack, if the basic block is before or after the joint point).
*
* This function needs to be called _before_ emitting the last instruction of
* the bb (i.e. before emitting a branch).
* If the stack merge fails at a join point, cfg->unverifiable is set.
*/
static void
handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
{
int i, bindex;
MonoBasicBlock *bb = cfg->cbb;
MonoBasicBlock *outb;
MonoInst *inst, **locals;
gboolean found;
if (!count)
return;
if (cfg->verbose_level > 3)
printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
if (!bb->out_scount) {
bb->out_scount = count;
//printf ("bblock %d has out:", bb->block_num);
found = FALSE;
for (i = 0; i < bb->out_count; ++i) {
outb = bb->out_bb [i];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER)
continue;
//printf (" %d", outb->block_num);
if (outb->in_stack) {
found = TRUE;
bb->out_stack = outb->in_stack;
break;
}
}
//printf ("\n");
if (!found) {
bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
for (i = 0; i < count; ++i) {
/*
* try to reuse temps already allocated for this purpouse, if they occupy the same
* stack slot and if they are of the same type.
* This won't cause conflicts since if 'local' is used to
* store one of the values in the in_stack of a bblock, then
* the same variable will be used for the same outgoing stack
* slot as well.
* This doesn't work when inlining methods, since the bblocks
* in the inlined methods do not inherit their in_stack from
* the bblock they are inlined to. See bug #58863 for an
* example.
*/
bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
}
}
}
for (i = 0; i < bb->out_count; ++i) {
outb = bb->out_bb [i];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER)
continue;
if (outb->in_scount) {
if (outb->in_scount != bb->out_scount) {
cfg->unverifiable = TRUE;
return;
}
continue; /* check they are the same locals */
}
outb->in_scount = count;
outb->in_stack = bb->out_stack;
}
locals = bb->out_stack;
cfg->cbb = bb;
for (i = 0; i < count; ++i) {
sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]);
EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
inst->cil_code = sp [i]->cil_code;
sp [i] = locals [i];
if (cfg->verbose_level > 3)
printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
}
/*
* It is possible that the out bblocks already have in_stack assigned, and
* the in_stacks differ. In this case, we will store to all the different
* in_stacks.
*/
found = TRUE;
bindex = 0;
while (found) {
/* Find a bblock which has a different in_stack */
found = FALSE;
while (bindex < bb->out_count) {
outb = bb->out_bb [bindex];
/* exception handlers are linked, but they should not be considered for stack args */
if (outb->flags & BB_EXCEPTION_HANDLER) {
bindex++;
continue;
}
if (outb->in_stack != locals) {
for (i = 0; i < count; ++i) {
sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]);
EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
inst->cil_code = sp [i]->cil_code;
sp [i] = locals [i];
if (cfg->verbose_level > 3)
printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
}
locals = outb->in_stack;
found = TRUE;
break;
}
bindex ++;
}
}
}
MonoInst*
mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
{
MonoInst *ins;
if (cfg->compile_aot) {
MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size
EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
MONO_RESTORE_WARNING
} else {
MonoJumpInfo ji;
gpointer target;
ERROR_DECL (error);
ji.type = patch_type;
ji.data.target = data;
target = mono_resolve_patch_target_ext (cfg->mem_manager, NULL, NULL, &ji, FALSE, error);
mono_error_assert_ok (error);
EMIT_NEW_PCONST (cfg, ins, target);
}
return ins;
}
static MonoInst*
mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
{
int tls_offset = mono_tls_get_tls_offset (key);
if (cfg->compile_aot)
return NULL;
if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_TLS_GET);
ins->dreg = mono_alloc_preg (cfg);
ins->inst_offset = tls_offset;
return ins;
}
return NULL;
}
static MonoInst*
mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
{
MonoInst *fast_tls = NULL;
if (!mini_debug_options.use_fallback_tls)
fast_tls = mono_create_fast_tls_getter (cfg, key);
if (fast_tls) {
MONO_ADD_INS (cfg->cbb, fast_tls);
return fast_tls;
}
const MonoJitICallId jit_icall_id = mono_get_tls_key_to_jit_icall_id (key);
if (cfg->compile_aot && !cfg->llvm_only) {
MonoInst *addr;
/*
* tls getters are critical pieces of code and we don't want to resolve them
* through the standard plt/tramp mechanism since we might expose ourselves
* to crashes and infinite recursions.
* Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
return mini_emit_calli (cfg, mono_icall_sig_ptr, NULL, addr, NULL, NULL);
} else {
return mono_emit_jit_icall_id (cfg, jit_icall_id, NULL);
}
}
/*
* emit_push_lmf:
*
* Emit IR to push the current LMF onto the LMF stack.
*/
static void
emit_push_lmf (MonoCompile *cfg)
{
/*
* Emit IR to push the LMF:
* lmf_addr = <lmf_addr from tls>
* lmf->lmf_addr = lmf_addr
* lmf->prev_lmf = *lmf_addr
* *lmf_addr = lmf
*/
MonoInst *ins, *lmf_ins;
if (!cfg->lmf_ir)
return;
int lmf_reg, prev_lmf_reg;
/*
* Store lmf_addr in a variable, so it can be allocated to a global register.
*/
if (!cfg->lmf_addr_var)
cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
if (!cfg->lmf_var) {
MonoInst *lmf_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
lmf_var->flags |= MONO_INST_VOLATILE;
lmf_var->flags |= MONO_INST_LMF;
cfg->lmf_var = lmf_var;
}
lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
g_assert (lmf_ins);
lmf_ins->dreg = cfg->lmf_addr_var->dreg;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
prev_lmf_reg = alloc_preg (cfg);
/* Save previous_lmf */
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
if (cfg->deopt)
/* Mark this as an LMFExt */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_POR_IMM, prev_lmf_reg, prev_lmf_reg, 2);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
/* Set new lmf */
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
}
/*
* emit_pop_lmf:
*
* Emit IR to pop the current LMF from the LMF stack.
*/
static void
emit_pop_lmf (MonoCompile *cfg)
{
int lmf_reg, lmf_addr_reg;
MonoInst *ins;
if (!cfg->lmf_ir)
return;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
int prev_lmf_reg;
/*
* Emit IR to pop the LMF:
* *(lmf->lmf_addr) = lmf->prev_lmf
*/
/* This could be called before emit_push_lmf () */
if (!cfg->lmf_addr_var)
cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
lmf_addr_reg = cfg->lmf_addr_var->dreg;
prev_lmf_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
if (cfg->deopt)
/* Clear out the bit set by push_lmf () to mark this as LMFExt */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PXOR_IMM, prev_lmf_reg, prev_lmf_reg, 2);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
}
/*
* target_type_is_incompatible:
* @cfg: MonoCompile context
*
* Check that the item @arg on the evaluation stack can be stored
* in the target type (can be a local, or field, etc).
* The cfg arg can be used to check if we need verification or just
* validity checks.
*
* Returns: non-0 value if arg can't be stored on a target.
*/
static int
target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
{
MonoType *simple_type;
MonoClass *klass;
if (m_type_is_byref (target)) {
/* FIXME: check that the pointed to types match */
if (arg->type == STACK_MP) {
/* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target))));
MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)));
/* if the target is native int& or X* or same type */
if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
return 0;
/* Both are primitive type byrefs and the source points to a larger type that the destination */
if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) &&
mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
return 0;
return 1;
}
if (arg->type == STACK_PTR)
return 0;
return 1;
}
simple_type = mini_get_underlying_type (target);
switch (simple_type->type) {
case MONO_TYPE_VOID:
return 1;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (arg->type != STACK_I4 && arg->type != STACK_PTR)
return 1;
return 0;
case MONO_TYPE_PTR:
/* STACK_MP is needed when setting pinned locals */
if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
#if SIZEOF_VOID_P == 8
if (arg->type != STACK_I8)
#endif
return 1;
return 0;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_FNPTR:
/*
* Some opcodes like ldloca returns 'transient pointers' which can be stored in
* in native int. (#688008).
*/
if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
return 1;
return 0;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (arg->type != STACK_OBJ)
return 1;
/* FIXME: check type compatibility */
return 0;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
if (arg->type != STACK_I8)
#if SIZEOF_VOID_P == 8
if (arg->type != STACK_PTR)
#endif
return 1;
return 0;
case MONO_TYPE_R4:
if (arg->type != cfg->r4_stack_type)
return 1;
return 0;
case MONO_TYPE_R8:
if (arg->type != STACK_R8)
return 1;
return 0;
case MONO_TYPE_VALUETYPE:
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
if (klass != arg->klass)
return 1;
return 0;
case MONO_TYPE_TYPEDBYREF:
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
if (klass != arg->klass)
return 1;
return 0;
case MONO_TYPE_GENERICINST:
if (mono_type_generic_inst_is_valuetype (simple_type)) {
MonoClass *target_class;
if (arg->type != STACK_VTYPE)
return 1;
klass = mono_class_from_mono_type_internal (simple_type);
target_class = mono_class_from_mono_type_internal (target);
/* The second cases is needed when doing partial sharing */
if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))))
return 1;
return 0;
} else {
if (arg->type != STACK_OBJ)
return 1;
/* FIXME: check type compatibility */
return 0;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
if (mini_type_var_is_vt (simple_type)) {
if (arg->type != STACK_VTYPE)
return 1;
} else {
if (arg->type != STACK_OBJ)
return 1;
}
return 0;
default:
g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
}
return 1;
}
/*
* convert_value:
*
* Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
*/
static MonoInst*
convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins)
{
if (!cfg->r4fp)
return ins;
type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_R4:
if (ins->type == STACK_R8) {
int dreg = alloc_freg (cfg);
MonoInst *conv;
EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg);
conv->type = STACK_R4;
return conv;
}
break;
case MONO_TYPE_R8:
if (ins->type == STACK_R4) {
int dreg = alloc_freg (cfg);
MonoInst *conv;
EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg);
conv->type = STACK_R8;
return conv;
}
break;
default:
break;
}
return ins;
}
/*
* Prepare arguments for passing to a function call.
* Return a non-zero value if the arguments can't be passed to the given
* signature.
* The type checks are not yet complete and some conversions may need
* casts on 32 or 64 bit architectures.
*
* FIXME: implement this using target_type_is_incompatible ()
*/
static gboolean
check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
{
MonoType *simple_type;
int i;
if (sig->hasthis) {
if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
return TRUE;
args++;
}
for (i = 0; i < sig->param_count; ++i) {
if (m_type_is_byref (sig->params [i])) {
if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
return TRUE;
continue;
}
simple_type = mini_get_underlying_type (sig->params [i]);
handle_enum:
switch (simple_type->type) {
case MONO_TYPE_VOID:
return TRUE;
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
return TRUE;
continue;
case MONO_TYPE_I:
case MONO_TYPE_U:
if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
if (args [i]->type != STACK_I4 && !(SIZEOF_VOID_P == 8 && args [i]->type == STACK_I8) &&
args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (args [i]->type != STACK_OBJ)
return TRUE;
continue;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
if (args [i]->type != STACK_I8 &&
!(SIZEOF_VOID_P == 8 && (args [i]->type == STACK_I4 || args [i]->type == STACK_PTR)))
return TRUE;
continue;
case MONO_TYPE_R4:
if (args [i]->type != cfg->r4_stack_type)
return TRUE;
continue;
case MONO_TYPE_R8:
if (args [i]->type != STACK_R8)
return TRUE;
continue;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (simple_type->data.klass)) {
simple_type = mono_class_enum_basetype_internal (simple_type->data.klass);
goto handle_enum;
}
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
case MONO_TYPE_TYPEDBYREF:
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
case MONO_TYPE_GENERICINST:
simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt */
if (args [i]->type != STACK_VTYPE)
return TRUE;
continue;
default:
g_error ("unknown type 0x%02x in check_call_signature",
simple_type->type);
}
}
return FALSE;
}
MonoJumpInfo *
mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
{
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->data.target = target;
return ji;
}
int
mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
{
if (cfg->gshared)
return mono_class_check_context_used (klass);
else
return 0;
}
int
mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
{
if (cfg->gshared)
return mono_method_check_context_used (method);
else
return 0;
}
/*
* check_method_sharing:
*
* Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
*/
static void
check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
{
gboolean pass_vtable = FALSE;
gboolean pass_mrgctx = FALSE;
if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) &&
(mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
gboolean sharable = FALSE;
if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
sharable = TRUE;
/*
* Pass vtable iff target method might
* be shared, which means that sharing
* is enabled for its class and its
* context is sharable (and it's not a
* generic method).
*/
if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
pass_vtable = TRUE;
}
if (mini_method_needs_mrgctx (cmethod)) {
if (mini_method_is_default_method (cmethod))
pass_vtable = FALSE;
else
g_assert (!pass_vtable);
if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
pass_mrgctx = TRUE;
} else {
if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod)))
pass_mrgctx = TRUE;
}
}
if (out_pass_vtable)
*out_pass_vtable = pass_vtable;
if (out_pass_mrgctx)
*out_pass_mrgctx = pass_mrgctx;
}
static gboolean
direct_icalls_enabled (MonoCompile *cfg, MonoMethod *method)
{
if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
return FALSE;
if (method && cfg->compile_aot && mono_aot_direct_icalls_enabled_for_method (cfg, method))
return TRUE;
/* LLVM on amd64 can't handle calls to non-32 bit addresses */
#ifdef TARGET_AMD64
if (cfg->compile_llvm && !cfg->llvm_only)
return FALSE;
#endif
return FALSE;
}
MonoInst*
mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
{
/*
* Call the jit icall without a wrapper if possible.
* The wrapper is needed to be able to do stack walks for asynchronously suspended
* threads when debugging.
*/
if (direct_icalls_enabled (cfg, NULL)) {
int costs;
if (!info->wrapper_method) {
info->wrapper_method = mono_marshal_get_icall_wrapper (info, TRUE);
mono_memory_barrier ();
}
/*
* Inline the wrapper method, which is basically a call to the C icall, and
* an exception check.
*/
costs = inline_method (cfg, info->wrapper_method, NULL,
args, NULL, il_offset, TRUE, NULL);
g_assert (costs > 0);
g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
return args [0];
}
return mono_emit_jit_icall_id (cfg, mono_jit_icall_info_id (info), args);
}
static MonoInst*
mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
{
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
if ((fsig->pinvoke || LLVM_ENABLED) && !m_type_is_byref (fsig->ret)) {
int widen_op = -1;
/*
* Native code might return non register sized integers
* without initializing the upper bits.
*/
switch (mono_type_to_load_membase (cfg, fsig->ret)) {
case OP_LOADI1_MEMBASE:
widen_op = OP_ICONV_TO_I1;
break;
case OP_LOADU1_MEMBASE:
widen_op = OP_ICONV_TO_U1;
break;
case OP_LOADI2_MEMBASE:
widen_op = OP_ICONV_TO_I2;
break;
case OP_LOADU2_MEMBASE:
widen_op = OP_ICONV_TO_U2;
break;
default:
break;
}
if (widen_op != -1) {
int dreg = alloc_preg (cfg);
MonoInst *widen;
EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
widen->type = ins->type;
ins = widen;
}
}
}
return ins;
}
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
static void
emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
{
MonoInst *args [2];
args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
mono_emit_jit_icall (cfg, mono_throw_method_access, args);
}
static void
emit_bad_image_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
{
mono_emit_jit_icall (cfg, mono_throw_bad_image, NULL);
}
static void
emit_not_supported_failure (MonoCompile *cfg)
{
mono_emit_jit_icall (cfg, mono_throw_not_supported, NULL);
}
static void
emit_invalid_program_with_msg (MonoCompile *cfg, MonoError *error_msg, MonoMethod *caller, MonoMethod *callee)
{
g_assert (!is_ok (error_msg));
char *str = mono_mem_manager_strdup (cfg->mem_manager, mono_error_get_message (error_msg));
MonoInst *iargs[1];
if (cfg->compile_aot)
EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
else
EMIT_NEW_PCONST (cfg, iargs [0], str);
mono_emit_jit_icall (cfg, mono_throw_invalid_program, iargs);
}
// FIXME Consolidate the multiple functions named get_method_nofail.
static MonoMethod*
get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags)
{
MonoMethod *method;
ERROR_DECL (error);
method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error);
mono_error_assert_ok (error);
g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass));
return method;
}
MonoMethod*
mini_get_memcpy_method (void)
{
static MonoMethod *memcpy_method = NULL;
if (!memcpy_method) {
memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0);
if (!memcpy_method)
g_error ("Old corlib found. Install a new one");
}
return memcpy_method;
}
MonoInst*
mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
MonoInst *store;
/*
* Add a release memory barrier so the object contents are flushed
* to memory before storing the reference into another object.
*/
if (!mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, ptr->dreg, 0, value->dreg);
mini_emit_write_barrier (cfg, ptr, value);
return store;
}
void
mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
{
int card_table_shift_bits;
target_mgreg_t card_table_mask;
guint8 *card_table;
MonoInst *dummy_use;
int nursery_shift_bits;
size_t nursery_size;
if (!cfg->gen_write_barriers)
return;
//method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask);
mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
MonoInst *wbarrier;
MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
wbarrier->sreg1 = ptr->dreg;
wbarrier->sreg2 = value->dreg;
MONO_ADD_INS (cfg->cbb, wbarrier);
} else if (card_table) {
int offset_reg = alloc_preg (cfg);
int card_reg;
MonoInst *ins;
/*
* We emit a fast light weight write barrier. This always marks cards as in the concurrent
* collector case, so, for the serial collector, it might slightly slow down nursery
* collections. We also expect that the host system and the target system have the same card
* table configuration, which is the case if they have the same pointer size.
*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
if (card_table_mask)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
/*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
* IMM's larger than 32bits.
*/
ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
card_reg = ins->dreg;
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
} else {
MonoMethod *write_barrier = mono_gc_get_write_barrier ();
mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
}
EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
}
MonoMethod*
mini_get_memset_method (void)
{
static MonoMethod *memset_method = NULL;
if (!memset_method) {
memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0);
if (!memset_method)
g_error ("Old corlib found. Install a new one");
}
return memset_method;
}
void
mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
{
MonoInst *iargs [3];
int n;
guint32 align;
MonoMethod *memset_method;
MonoInst *size_ins = NULL;
MonoInst *bzero_ins = NULL;
static MonoMethod *bzero_method;
/* FIXME: Optimize this for the case when dest is an LDADDR */
mono_class_init_internal (klass);
if (mini_is_gsharedvt_klass (klass)) {
size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
if (!bzero_method)
bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0);
g_assert (bzero_method);
iargs [0] = dest;
iargs [1] = size_ins;
mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL);
return;
}
klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
n = mono_class_value_size (klass, &align);
if (n <= TARGET_SIZEOF_VOID_P * 8) {
mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
}
else {
memset_method = mini_get_memset_method ();
iargs [0] = dest;
EMIT_NEW_ICONST (cfg, iargs [1], 0);
EMIT_NEW_ICONST (cfg, iargs [2], n);
mono_emit_method_call (cfg, memset_method, iargs, NULL);
}
}
static gboolean
context_used_is_mrgctx (MonoCompile *cfg, int context_used)
{
/* gshared dim methods use an mrgctx */
if (mini_method_is_default_method (cfg->method))
return context_used != 0;
return context_used & MONO_GENERIC_CONTEXT_USED_METHOD;
}
/*
* emit_get_rgctx:
*
* Emit IR to return either the vtable or the mrgctx.
*/
static MonoInst*
emit_get_rgctx (MonoCompile *cfg, int context_used)
{
MonoMethod *method = cfg->method;
g_assert (cfg->gshared);
/* Data whose context contains method type vars is stored in the mrgctx */
if (context_used_is_mrgctx (cfg, context_used)) {
MonoInst *mrgctx_loc, *mrgctx_var;
g_assert (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX);
if (!mini_method_is_default_method (method))
g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
if (cfg->llvm_only) {
mrgctx_var = mono_get_mrgctx_var (cfg);
} else {
/* Volatile */
mrgctx_loc = mono_get_mrgctx_var (cfg);
g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
}
return mrgctx_var;
}
/*
* The rest of the entries are stored in vtable->runtime_generic_context so
* have to return a vtable.
*/
if (cfg->rgctx_access == MONO_RGCTX_ACCESS_MRGCTX) {
MonoInst *mrgctx_loc, *mrgctx_var, *vtable_var;
int vtable_reg;
/* We are passed an mrgctx, return mrgctx->class_vtable */
if (cfg->llvm_only) {
mrgctx_var = mono_get_mrgctx_var (cfg);
} else {
mrgctx_loc = mono_get_mrgctx_var (cfg);
g_assert (mrgctx_loc->flags & MONO_INST_VOLATILE);
EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
}
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
vtable_var->type = STACK_PTR;
return vtable_var;
} else if (cfg->rgctx_access == MONO_RGCTX_ACCESS_VTABLE) {
MonoInst *vtable_loc, *vtable_var;
/* We are passed a vtable, return it */
if (cfg->llvm_only) {
vtable_var = mono_get_vtable_var (cfg);
} else {
vtable_loc = mono_get_vtable_var (cfg);
g_assert (vtable_loc->flags & MONO_INST_VOLATILE);
EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
}
vtable_var->type = STACK_PTR;
return vtable_var;
} else {
MonoInst *ins, *this_ins;
int vtable_reg;
/* We are passed a this pointer, return this->vtable */
EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ());
vtable_reg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
return ins;
}
}
static MonoJumpInfoRgctxEntry *
mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
{
MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
if (in_mrgctx)
res->d.method = method;
else
res->d.klass = method->klass;
res->in_mrgctx = in_mrgctx;
res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
res->data->type = patch_type;
res->data->data.target = patch_data;
res->info_type = info_type;
return res;
}
static MonoInst*
emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type);
static MonoInst*
emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
{
MonoInst *call;
MonoInst *slot_ins;
EMIT_NEW_AOTCONST (cfg, slot_ins, MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
// Can't add basic blocks during interp entry mode
if (cfg->disable_inline_rgctx_fetch || cfg->interp_entry_only) {
MonoInst *args [2] = { rgctx, slot_ins };
if (entry->in_mrgctx)
call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
else
call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
return call;
}
MonoBasicBlock *slowpath_bb, *end_bb;
MonoInst *ins, *res;
int rgctx_reg, res_reg;
/*
* rgctx = vtable->runtime_generic_context;
* if (rgctx) {
* val = rgctx [slot + 1];
* if (val)
* return val;
* }
* <slowpath>
*/
NEW_BBLOCK (cfg, end_bb);
NEW_BBLOCK (cfg, slowpath_bb);
if (entry->in_mrgctx) {
rgctx_reg = rgctx->dreg;
} else {
rgctx_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
// FIXME: Avoid this check by allocating the table when the vtable is created etc.
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
}
int table_size = mono_class_rgctx_get_array_size (0, entry->in_mrgctx);
if (entry->in_mrgctx)
table_size -= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_ins->dreg, table_size - 1);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBGE, slowpath_bb);
int shifted_slot_reg = alloc_ireg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISHL_IMM, shifted_slot_reg, slot_ins->dreg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
int addr_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, addr_reg, rgctx_reg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, addr_reg, addr_reg, shifted_slot_reg);
int val_reg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, addr_reg, TARGET_SIZEOF_VOID_P + (entry->in_mrgctx ? MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT : 0));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
res_reg = alloc_preg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, val_reg);
res = ins;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, slowpath_bb);
slowpath_bb->out_of_line = TRUE;
MonoInst *args[2] = { rgctx, slot_ins };
if (entry->in_mrgctx)
call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
else
call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, call->dreg);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
return res;
}
/*
* emit_rgctx_fetch:
*
* Emit IR to load the value of the rgctx entry ENTRY from the rgctx.
*/
static MonoInst*
emit_rgctx_fetch (MonoCompile *cfg, int context_used, MonoJumpInfoRgctxEntry *entry)
{
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
if (cfg->llvm_only)
return emit_rgctx_fetch_inline (cfg, rgctx, entry);
else
return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, mono_icall_sig_ptr_ptr, &rgctx);
}
/*
* mini_emit_get_rgctx_klass:
*
* Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
* normal constants, else emit a load from the rgctx.
*/
MonoInst*
mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
if (!context_used) {
MonoInst *ins;
switch (rgctx_type) {
case MONO_RGCTX_INFO_KLASS:
EMIT_NEW_CLASSCONST (cfg, ins, klass);
return ins;
case MONO_RGCTX_INFO_VTABLE: {
MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
EMIT_NEW_VTABLECONST (cfg, ins, vtable);
return ins;
}
default:
g_assert_not_reached ();
}
}
// Its cheaper to load these from the gsharedvt info struct
if (cfg->llvm_only && cfg->gsharedvt)
return mini_emit_get_gsharedvt_info_klass (cfg, klass, rgctx_type);
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
mono_error_exit:
return NULL;
}
static MonoInst*
emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
static MonoInst*
emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoGSharedVtCall *call_info;
MonoJumpInfoRgctxEntry *entry;
call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
call_info->sig = sig;
call_info->method = cmethod;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
/*
* emit_get_rgctx_virt_method:
*
* Return data for method VIRT_METHOD for a receiver of type KLASS.
*/
static MonoInst*
emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
{
MonoJumpInfoVirtMethod *info;
MonoJumpInfoRgctxEntry *entry;
if (context_used == -1)
context_used = mono_class_check_context_used (klass) | mono_method_check_context_used (virt_method);
info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
info->klass = klass;
info->method = virt_method;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
static MonoInst*
emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
{
MonoJumpInfoRgctxEntry *entry;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
return emit_rgctx_fetch (cfg, context_used, entry);
}
/*
* emit_get_rgctx_method:
*
* Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
* normal constants, else emit a load from the rgctx.
*/
static MonoInst*
emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
if (context_used == -1)
context_used = mono_method_check_context_used (cmethod);
if (!context_used) {
MonoInst *ins;
switch (rgctx_type) {
case MONO_RGCTX_INFO_METHOD:
EMIT_NEW_METHODCONST (cfg, ins, cmethod);
return ins;
case MONO_RGCTX_INFO_METHOD_RGCTX:
EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
return ins;
case MONO_RGCTX_INFO_METHOD_FTNDESC:
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod);
return ins;
case MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY:
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_LLVMONLY_INTERP_ENTRY, cmethod);
return ins;
default:
g_assert_not_reached ();
}
} else {
// Its cheaper to load these from the gsharedvt info struct
if (cfg->llvm_only && cfg->gsharedvt)
return emit_get_gsharedvt_info (cfg, cmethod, rgctx_type);
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
}
static MonoInst*
emit_get_rgctx_field (MonoCompile *cfg, int context_used,
MonoClassField *field, MonoRgctxInfoType rgctx_type)
{
// Its cheaper to load these from the gsharedvt info struct
if (cfg->llvm_only && cfg->gsharedvt)
return emit_get_gsharedvt_info (cfg, field, rgctx_type);
MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
MonoInst*
mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
{
return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type);
}
static int
get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
{
MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
MonoRuntimeGenericContextInfoTemplate *template_;
int i, idx;
g_assert (info);
for (i = 0; i < info->num_entries; ++i) {
MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
return i;
}
if (info->num_entries == info->count_entries) {
MonoRuntimeGenericContextInfoTemplate *new_entries;
int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
info->entries = new_entries;
info->count_entries = new_count_entries;
}
idx = info->num_entries;
template_ = &info->entries [idx];
template_->info_type = rgctx_type;
template_->data = data;
info->num_entries ++;
return idx;
}
/*
* emit_get_gsharedvt_info:
*
* This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
*/
static MonoInst*
emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
{
MonoInst *ins;
int idx, dreg;
idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
/* Load info->entries [idx] */
dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
return ins;
}
MonoInst*
mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
{
return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type);
}
/*
* On return the caller must check @klass for load errors.
*/
static void
emit_class_init (MonoCompile *cfg, MonoClass *klass)
{
MonoInst *vtable_arg;
int context_used;
context_used = mini_class_check_context_used (cfg, klass);
if (context_used) {
vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error);
if (!is_ok (cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
MonoInst *ins;
/*
* Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
* so this doesn't have to clobber any regs and it doesn't break basic blocks.
*/
MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
ins->sreg1 = vtable_arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else {
int inited_reg;
MonoBasicBlock *inited_bb;
inited_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
NEW_BBLOCK (cfg, inited_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
cfg->cbb->out_of_line = TRUE;
mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg);
MONO_START_BB (cfg, inited_bb);
}
}
static void
emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
{
MonoInst *ins;
if (cfg->gen_seq_points && cfg->method == method) {
NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
if (nonempty_stack)
ins->flags |= MONO_INST_NONEMPTY_STACK;
MONO_ADD_INS (cfg->cbb, ins);
cfg->last_seq_point = ins;
}
}
void
mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
{
if (mini_debug_options.better_cast_details) {
int vtable_reg = alloc_preg (cfg);
int klass_reg = alloc_preg (cfg);
MonoBasicBlock *is_null_bb = NULL;
MonoInst *tls_get;
if (null_check) {
NEW_BBLOCK (cfg, is_null_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
}
tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
if (!tls_get) {
fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
exit (1);
}
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg);
if (null_check)
MONO_START_BB (cfg, is_null_bb);
}
}
void
mini_reset_cast_details (MonoCompile *cfg)
{
/* Reset the variables holding the cast details */
if (mini_debug_options.better_cast_details) {
MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
/* It is enough to reset the from field */
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
}
}
/*
* On return the caller must check @array_class for load errors
*/
static void
mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
{
int vtable_reg = alloc_preg (cfg);
int context_used;
context_used = mini_class_check_context_used (cfg, array_class);
mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
if (context_used) {
MonoInst *vtable_ins;
vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
} else {
if (cfg->compile_aot) {
int vt_reg;
MonoVTable *vtable;
if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
vt_reg = alloc_preg (cfg);
MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
} else {
MonoVTable *vtable;
if (!(vtable = mono_class_vtable_checked (array_class, cfg->error))) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable);
}
}
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
mini_reset_cast_details (cfg);
}
/**
* Handles unbox of a Nullable<T>. If context_used is non zero, then shared
* generic code is generated.
*/
static MonoInst*
handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
{
MonoMethod* method;
if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass)))
method = get_method_nofail (klass, "UnboxExact", 1, 0);
else
method = get_method_nofail (klass, "Unbox", 1, 0);
g_assert (method);
if (context_used) {
MonoInst *rgctx, *addr;
/* FIXME: What if the class is shared? We might not
have to get the address of the method from the
RGCTX. */
if (cfg->llvm_only) {
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_METHOD_FTNDESC);
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method));
return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
} else {
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
rgctx = emit_get_rgctx (cfg, context_used);
return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
MonoInst *rgctx_arg = NULL;
check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
g_assert (!pass_mrgctx);
if (pass_vtable) {
MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error);
mono_error_assert_ok (cfg->error);
EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
}
return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
MonoInst*
mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used)
{
MonoInst *add;
int obj_reg;
int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
int klass_reg = alloc_dreg (cfg ,STACK_PTR);
int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
int rank_reg = alloc_dreg (cfg ,STACK_I4);
obj_reg = val->dreg;
MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
/* FIXME: generics */
g_assert (m_class_get_rank (klass) == 0);
// Check rank == 0
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ());
if (context_used) {
MonoInst *element_class;
/* This assertion is from the unboxcast insn */
g_assert (m_class_get_rank (klass) == 0);
element_class = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
} else {
mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE);
mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass));
mini_reset_cast_details (cfg);
}
NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
add->type = STACK_MP;
add->klass = klass;
return add;
}
static MonoInst*
handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
{
MonoInst *addr, *klass_inst, *is_ref, *args[16];
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *ins;
int dreg, addr_reg;
klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
/* obj */
args [0] = obj;
/* klass */
args [1] = klass_inst;
/* CASTCLASS */
obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
addr_reg = alloc_dreg (cfg, STACK_MP);
/* Non-ref case */
/* UNBOX */
NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, addr);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
/* Save the ref to a temporary */
dreg = alloc_ireg (cfg);
EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass));
addr->dreg = addr_reg;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Nullable case */
MONO_START_BB (cfg, is_nullable_bb);
{
MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
MonoInst *unbox_call;
MonoMethodSignature *unbox_sig;
unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
unbox_sig->ret = m_class_get_byval_arg (klass);
unbox_sig->param_count = 1;
unbox_sig->params [0] = mono_get_object_type ();
if (cfg->llvm_only)
unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
else
unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass));
addr->dreg = addr_reg;
}
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* End */
MONO_START_BB (cfg, end_bb);
/* LDOBJ */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0);
return ins;
}
/*
* Returns NULL and set the cfg exception on error.
*/
static MonoInst*
handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
{
MonoInst *iargs [2];
MonoJitICallId alloc_ftn;
if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) {
char* full_name = mono_type_get_full_name (klass);
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_member_access (cfg->error, "Cannot create an abstract class: %s", full_name);
g_free (full_name);
return NULL;
}
if (context_used) {
gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
iargs [0] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VTABLE);
alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
if (managed_alloc) {
if (known_instance_size) {
int size = mono_class_instance_size (klass);
if (size < MONO_ABI_SIZEOF (MonoObject))
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_ICONST (cfg, iargs [1], size);
}
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
}
if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
/* This happens often in argument checking code, eg. throw new FooException... */
/* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass)));
alloc_ftn = MONO_JIT_ICALL_mono_helper_newobj_mscorlib;
} else {
MonoVTable *vtable = mono_class_vtable_checked (klass, cfg->error);
if (!is_ok (cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return NULL;
}
MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
if (managed_alloc) {
int size = mono_class_instance_size (klass);
if (size < MONO_ABI_SIZEOF (MonoObject))
g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
EMIT_NEW_ICONST (cfg, iargs [1], size);
return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
}
alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
}
return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
}
/*
* Returns NULL and set the cfg exception on error.
*/
MonoInst*
mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
{
MonoInst *alloc, *ins;
if (G_UNLIKELY (m_class_is_byreflike (klass))) {
mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass));
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return NULL;
}
if (mono_class_is_nullable (klass)) {
MonoMethod* method = get_method_nofail (klass, "Box", 1, 0);
if (context_used) {
if (cfg->llvm_only) {
MonoMethodSignature *sig = mono_method_signature_internal (method);
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_METHOD_FTNDESC);
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
return mini_emit_llvmonly_calli (cfg, sig, &val, addr);
} else {
/* FIXME: What if the class is shared? We might not
have to get the method address from the RGCTX. */
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
}
} else {
gboolean pass_vtable, pass_mrgctx;
MonoInst *rgctx_arg = NULL;
check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
g_assert (!pass_mrgctx);
if (pass_vtable) {
MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error);
mono_error_assert_ok (cfg->error);
EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
}
return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
}
}
if (mini_is_gsharedvt_klass (klass)) {
MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
MonoInst *res, *is_ref, *src_var, *addr;
int dreg;
dreg = alloc_ireg (cfg);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, is_nullable_bb);
NEW_BBLOCK (cfg, end_bb);
is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
/* Non-ref case */
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
return NULL;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
ins->opcode = OP_STOREV_MEMBASE;
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
res->type = STACK_OBJ;
res->klass = klass;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
/* val is a vtype, so has to load the value manually */
src_var = get_vreg_to_inst (cfg, val->dreg);
if (!src_var)
src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg);
EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Nullable case */
MONO_START_BB (cfg, is_nullable_bb);
{
MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
MonoInst *box_call;
MonoMethodSignature *box_sig;
/*
* klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
* construct that method at JIT time, so have to do things by hand.
*/
box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
box_sig->ret = mono_get_object_type ();
box_sig->param_count = 1;
box_sig->params [0] = m_class_get_byval_arg (klass);
if (cfg->llvm_only)
box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr);
else
box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
res->type = STACK_OBJ;
res->klass = klass;
}
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
return res;
}
alloc = handle_alloc (cfg, klass, TRUE, context_used);
if (!alloc)
return NULL;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
return alloc;
}
static gboolean
method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
{
if (cmethod->klass == mono_defaults.systemtype_class) {
if (!strcmp (cmethod->name, "GetType"))
return TRUE;
}
/*
* In corelib code, methods which need to do a stack walk declare a StackCrawlMark local and pass it as an
* arguments until it reaches an icall. Its hard to detect which methods do that especially with
* StackCrawlMark.LookForMyCallersCaller, so for now, just hardcode the classes which contain the public
* methods whose caller is needed.
*/
if (mono_is_corlib_image (m_class_get_image (cmethod->klass))) {
const char *cname = m_class_get_name (cmethod->klass);
if (!strcmp (cname, "Assembly") ||
!strcmp (cname, "AssemblyLoadContext") ||
(!strcmp (cname, "Activator"))) {
if (!strcmp (cmethod->name, "op_Equality"))
return FALSE;
return TRUE;
}
}
return FALSE;
}
G_GNUC_UNUSED MonoInst*
mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag)
{
MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass));
guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
gboolean is_i4;
switch (enum_type->type) {
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#if SIZEOF_REGISTER == 8
case MONO_TYPE_I:
case MONO_TYPE_U:
#endif
is_i4 = FALSE;
break;
default:
is_i4 = TRUE;
break;
}
{
MonoInst *load = NULL, *and_, *cmp, *ceq;
int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
int dest_reg = alloc_ireg (cfg);
if (enum_this) {
EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
} else {
g_assert (enum_val_reg != -1);
enum_reg = enum_val_reg;
}
EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
ceq->type = STACK_I4;
if (!is_i4) {
load = load ? mono_decompose_opcode (cfg, load) : NULL;
and_ = mono_decompose_opcode (cfg, and_);
cmp = mono_decompose_opcode (cfg, cmp);
ceq = mono_decompose_opcode (cfg, ceq);
}
return ceq;
}
}
static void
emit_set_deopt_il_offset (MonoCompile *cfg, int offset)
{
MonoInst *ins;
if (!(cfg->deopt && cfg->method == cfg->current_method))
return;
EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, ins->dreg, MONO_STRUCT_OFFSET (MonoMethodILState, il_offset), offset);
}
static MonoInst*
emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used,
MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type)
{
MonoDelegateClassMethodPair *info;
MonoJumpInfoRgctxEntry *entry;
info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
info->klass = klass;
info->method = virt_method;
info->is_virtual = _virtual;
entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type);
return emit_rgctx_fetch (cfg, context_used, entry);
}
/*
* Returns NULL and set the cfg exception on error.
*/
static G_GNUC_UNUSED MonoInst*
handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_)
{
MonoInst *ptr;
int dreg;
gpointer trampoline;
MonoInst *obj, *tramp_ins;
guint8 **code_slot;
if (virtual_ && !cfg->llvm_only) {
MonoMethod *invoke = mono_get_delegate_invoke_internal (klass);
g_assert (invoke);
//FIXME verify & fix any issue with removing invoke_context_used restriction
if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method))
return NULL;
}
obj = handle_alloc (cfg, klass, FALSE, invoke_context_used);
if (!obj)
return NULL;
/* Inline the contents of mono_delegate_ctor */
/* Set target field */
/* Optimize away setting of NULL target */
if (!MONO_INS_IS_PCONST_NULL (target)) {
if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
}
if (!mini_debug_options.weak_memory_model)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
if (cfg->gen_write_barriers) {
dreg = alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
mini_emit_write_barrier (cfg, ptr, target);
}
}
/* Set method field */
if (!(target_method_context_used || invoke_context_used) && !cfg->llvm_only) {
//If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
}
if (cfg->llvm_only) {
if (virtual_) {
MonoInst *args [ ] = {
obj,
target,
emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD)
};
mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args);
return obj;
}
}
/*
* To avoid looking up the compiled code belonging to the target method
* in mono_delegate_trampoline (), we allocate a per-domain memory slot to
* store it, and we fill it after the method has been compiled.
*/
if (!method->dynamic && !cfg->llvm_only) {
MonoInst *code_slot_ins;
if (target_method_context_used) {
code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
} else {
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
jit_mm_lock (jit_mm);
if (!jit_mm->method_code_hash)
jit_mm->method_code_hash = g_hash_table_new (NULL, NULL);
code_slot = (guint8 **)g_hash_table_lookup (jit_mm->method_code_hash, method);
if (!code_slot) {
code_slot = (guint8 **)mono_mem_manager_alloc0 (jit_mm->mem_manager, sizeof (gpointer));
g_hash_table_insert (jit_mm->method_code_hash, method, code_slot);
}
jit_mm_unlock (jit_mm);
code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
}
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
}
if (target_method_context_used || invoke_context_used) {
tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO);
//This is emited as a contant store for the non-shared case.
//We copy from the delegate trampoline info as it's faster than a rgctx fetch
dreg = alloc_preg (cfg);
if (!cfg->llvm_only) {
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg);
}
} else if (cfg->compile_aot) {
MonoDelegateClassMethodPair *del_tramp;
del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
del_tramp->klass = klass;
del_tramp->method = method;
del_tramp->is_virtual = virtual_;
EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
} else {
if (virtual_)
trampoline = mono_create_delegate_virtual_trampoline (klass, method);
else
trampoline = mono_create_delegate_trampoline_info (klass, method);
EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
}
if (cfg->llvm_only) {
MonoInst *args [ ] = {
obj,
tramp_ins
};
mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, args);
return obj;
}
/* Set invoke_impl field */
if (virtual_) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
} else {
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
}
dreg = alloc_preg (cfg);
MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
/* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
return obj;
}
/*
* handle_constrained_gsharedvt_call:
*
* Handle constrained calls where the receiver is a gsharedvt type.
* Return the instruction representing the call. Set the cfg exception on failure.
*/
static MonoInst*
handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
gboolean *ref_emit_widen)
{
MonoInst *ins = NULL;
gboolean emit_widen = *ref_emit_widen;
gboolean supported;
/*
* Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
* This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
* pack the arguments into an array, and do the rest of the work in in an icall.
*/
supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib));
if (supported)
supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret));
if (supported) {
if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) {
supported = TRUE;
} else {
supported = TRUE;
for (int i = 0; i < fsig->param_count; ++i) {
if (!(m_type_is_byref (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i]) || mini_is_gsharedvt_type (fsig->params [i])))
supported = FALSE;
}
}
}
if (supported) {
MonoInst *args [5];
/*
* This case handles calls to
* - object:ToString()/Equals()/GetHashCode(),
* - System.IComparable<T>:CompareTo()
* - System.IEquatable<T>:Equals ()
* plus some simple interface calls enough to support AsyncTaskMethodBuilder.
*/
if (fsig->hasthis)
args [0] = sp [0];
else
EMIT_NEW_PCONST (cfg, args [0], NULL);
args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
/* !fsig->hasthis is for the wrapper for the Object.GetType () icall or static virtual methods */
if ((fsig->hasthis || m_method_is_static (cmethod)) && fsig->param_count) {
/* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean *deref_args, gpointer *args) */
gboolean has_gsharedvt = FALSE;
for (int i = 0; i < fsig->param_count; ++i) {
if (mini_is_gsharedvt_type (fsig->params [i]))
has_gsharedvt = TRUE;
}
/* Pass an array of bools which signal whenever the corresponding argument is a gsharedvt ref type */
if (has_gsharedvt) {
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = fsig->param_count;
MONO_ADD_INS (cfg->cbb, ins);
args [3] = ins;
} else {
EMIT_NEW_PCONST (cfg, args [3], 0);
}
/* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t);
MONO_ADD_INS (cfg->cbb, ins);
args [4] = ins;
for (int i = 0; i < fsig->param_count; ++i) {
int addr_reg;
if (mini_is_gsharedvt_type (fsig->params [i])) {
MonoInst *is_deref;
int deref_arg_reg;
ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [i]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
deref_arg_reg = alloc_preg (cfg);
/* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
EMIT_NEW_BIALU_IMM (cfg, is_deref, OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, i, is_deref->dreg);
} else if (has_gsharedvt) {
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, args [3]->dreg, i, 0);
}
MonoInst *arg = sp [i + fsig->hasthis];
if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])) {
EMIT_NEW_VARLOADA_VREG (cfg, ins, arg->dreg, fsig->params [i]);
addr_reg = ins->dreg;
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg);
} else {
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), arg->dreg);
}
}
} else {
EMIT_NEW_ICONST (cfg, args [3], 0);
EMIT_NEW_ICONST (cfg, args [4], 0);
}
ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
emit_widen = FALSE;
if (mini_is_gsharedvt_type (fsig->ret)) {
ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins);
} else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) {
MonoInst *add;
/* Unbox */
NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject));
MONO_ADD_INS (cfg->cbb, add);
/* Load value */
NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
MONO_ADD_INS (cfg->cbb, ins);
/* ins represents the call result */
}
} else {
GSHAREDVT_FAILURE (CEE_CALLVIRT);
}
*ref_emit_widen = emit_widen;
return ins;
exception_exit:
return NULL;
}
static void
mono_emit_load_got_addr (MonoCompile *cfg)
{
MonoInst *getaddr, *dummy_use;
if (!cfg->got_var || cfg->got_var_allocated)
return;
MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
getaddr->cil_code = cfg->header->code;
getaddr->dreg = cfg->got_var->dreg;
/* Add it to the start of the first bblock */
if (cfg->bb_entry->code) {
getaddr->next = cfg->bb_entry->code;
cfg->bb_entry->code = getaddr;
}
else
MONO_ADD_INS (cfg->bb_entry, getaddr);
cfg->got_var_allocated = TRUE;
/*
* Add a dummy use to keep the got_var alive, since real uses might
* only be generated by the back ends.
* Add it to end_bblock, so the variable's lifetime covers the whole
* method.
* It would be better to make the usage of the got var explicit in all
* cases when the backend needs it (i.e. calls, throw etc.), so this
* wouldn't be needed.
*/
NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
MONO_ADD_INS (cfg->bb_exit, dummy_use);
}
static MonoMethod*
get_constrained_method (MonoCompile *cfg, MonoImage *image, guint32 token,
MonoMethod *cil_method, MonoClass *constrained_class,
MonoGenericContext *generic_context)
{
MonoMethod *cmethod = cil_method;
gboolean constrained_is_generic_param =
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
if (cfg->current_method->wrapper_type != MONO_WRAPPER_NONE) {
if (cfg->verbose_level > 2)
printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
if (!(constrained_is_generic_param &&
cfg->gshared)) {
cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, cfg->error);
CHECK_CFG_ERROR;
}
} else {
if (cfg->verbose_level > 2)
printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
if (constrained_is_generic_param && cfg->gshared) {
/*
* This is needed since get_method_constrained can't find
* the method in klass representing a type var.
* The type var is guaranteed to be a reference type in this
* case.
*/
if (!mini_is_gsharedvt_klass (constrained_class))
g_assert (!m_class_is_valuetype (cmethod->klass));
} else {
cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, cfg->error);
CHECK_CFG_ERROR;
}
}
return cmethod;
mono_error_exit:
return NULL;
}
static gboolean
method_does_not_return (MonoMethod *method)
{
// FIXME: Under netcore, these are decorated with the [DoesNotReturn] attribute
return m_class_get_image (method->klass) == mono_defaults.corlib &&
!strcmp (m_class_get_name (method->klass), "ThrowHelper") &&
strstr (method->name, "Throw") == method->name &&
!method->is_inflated;
}
static int inline_limit, llvm_jit_inline_limit, llvm_aot_inline_limit;
static gboolean inline_limit_inited;
static gboolean
mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
{
MonoMethodHeaderSummary header;
MonoVTable *vtable;
int limit;
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
MonoMethodSignature *sig = mono_method_signature_internal (method);
int i;
#endif
if (cfg->disable_inline)
return FALSE;
if (cfg->gsharedvt)
return FALSE;
if (cfg->inline_depth > 10)
return FALSE;
if (!mono_method_get_header_summary (method, &header))
return FALSE;
/*runtime, icall and pinvoke are checked by summary call*/
if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
header.has_clauses)
return FALSE;
if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ)
/* Used to mark methods containing StackCrawlMark locals */
return FALSE;
/* also consider num_locals? */
/* Do the size check early to avoid creating vtables */
if (!inline_limit_inited) {
char *inlinelimit;
if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
inline_limit = atoi (inlinelimit);
llvm_jit_inline_limit = inline_limit;
llvm_aot_inline_limit = inline_limit;
g_free (inlinelimit);
} else {
inline_limit = INLINE_LENGTH_LIMIT;
llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT;
llvm_aot_inline_limit = LLVM_AOT_INLINE_LENGTH_LIMIT;
}
inline_limit_inited = TRUE;
}
if (COMPILE_LLVM (cfg)) {
if (cfg->compile_aot)
limit = llvm_aot_inline_limit;
else
limit = llvm_jit_inline_limit;
} else {
limit = inline_limit;
}
if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
return FALSE;
/*
* if we can initialize the class of the method right away, we do,
* otherwise we don't allow inlining if the class needs initialization,
* since it would mean inserting a call to mono_runtime_class_init()
* inside the inlined code
*/
if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass))
return FALSE;
{
/* The AggressiveInlining hint is a good excuse to force that cctor to run. */
if ((cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) || method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
if (m_class_has_cctor (method->klass)) {
ERROR_DECL (error);
vtable = mono_class_vtable_checked (method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
if (!cfg->compile_aot) {
if (!mono_runtime_class_init_full (vtable, error)) {
mono_error_cleanup (error);
return FALSE;
}
}
}
} else if (mono_class_is_before_field_init (method->klass)) {
if (cfg->run_cctors && m_class_has_cctor (method->klass)) {
ERROR_DECL (error);
/*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
if (!m_class_get_runtime_vtable (method->klass))
/* No vtable created yet */
return FALSE;
vtable = mono_class_vtable_checked (method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
if (! vtable->initialized)
return FALSE;
if (!mono_runtime_class_init_full (vtable, error)) {
mono_error_cleanup (error);
return FALSE;
}
}
} else if (mono_class_needs_cctor_run (method->klass, NULL)) {
ERROR_DECL (error);
if (!m_class_get_runtime_vtable (method->klass))
/* No vtable created yet */
return FALSE;
vtable = mono_class_vtable_checked (method->klass, error);
if (!is_ok (error)) {
mono_error_cleanup (error);
return FALSE;
}
if (!vtable->initialized)
return FALSE;
}
}
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (mono_arch_is_soft_float ()) {
/* FIXME: */
if (sig->ret && sig->ret->type == MONO_TYPE_R4)
return FALSE;
for (i = 0; i < sig->param_count; ++i)
if (!m_type_is_byref (sig->params [i]) && sig->params [i]->type == MONO_TYPE_R4)
return FALSE;
}
#endif
if (g_list_find (cfg->dont_inline, method))
return FALSE;
if (mono_profiler_get_call_instrumentation_flags (method))
return FALSE;
if (mono_profiler_coverage_instrumentation_enabled (method))
return FALSE;
if (method_does_not_return (method))
return FALSE;
return TRUE;
}
static gboolean
mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
{
if (!cfg->compile_aot) {
g_assert (vtable);
if (vtable->initialized)
return FALSE;
}
if (mono_class_is_before_field_init (klass)) {
if (cfg->method == method)
return FALSE;
}
if (!mono_class_needs_cctor_run (klass, method))
return FALSE;
if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
/* The initialization is already done before the method is called */
return FALSE;
return TRUE;
}
int
mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index)
{
int index_reg = index->dreg;
int index2_reg;
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
/*
* abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
* during OP_BOUNDS_CHECK decomposition, and in the implementation
* of OP_X86_LEA for llvm.
*/
index2_reg = index_reg;
} else {
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
}
#else
if (index->type == STACK_I8) {
index2_reg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
} else {
index2_reg = index_reg;
}
#endif
return index2_reg;
}
MonoInst*
mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded)
{
MonoInst *ins;
guint32 size;
int mult_reg, add_reg, array_reg, index2_reg, bounds_reg, lower_bound_reg, realidx2_reg;
int context_used;
if (mini_is_gsharedvt_variable_klass (klass)) {
size = -1;
} else {
mono_class_init_internal (klass);
size = mono_class_array_element_size (klass);
}
mult_reg = alloc_preg (cfg);
array_reg = arr->dreg;
realidx2_reg = index2_reg = mini_emit_sext_index_reg (cfg, index);
if (bounded) {
bounds_reg = alloc_preg (cfg);
lower_bound_reg = alloc_preg (cfg);
realidx2_reg = alloc_preg (cfg);
MonoBasicBlock *is_null_bb = NULL;
NEW_BBLOCK (cfg, is_null_bb);
// gint32 lower_bound = 0;
// if (arr->bounds)
// lower_bound = arr->bounds.lower_bound;
// realidx2 = index2 - lower_bound;
MONO_EMIT_NEW_PCONST (cfg, lower_bound_reg, NULL);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, lower_bound_reg, bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_START_BB (cfg, is_null_bb);
MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2_reg, lower_bound_reg);
}
if (bcheck)
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, realidx2_reg);
#if defined(TARGET_X86) || defined(TARGET_AMD64)
if (size == 1 || size == 2 || size == 4 || size == 8) {
static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
EMIT_NEW_X86_LEA (cfg, ins, array_reg, realidx2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
ins->klass = klass;
ins->type = STACK_MP;
return ins;
}
#endif
add_reg = alloc_ireg_mp (cfg);
if (size == -1) {
MonoInst *rgctx_ins;
/* gsharedvt */
g_assert (cfg->gshared);
context_used = mini_class_check_context_used (cfg, klass);
g_assert (context_used);
rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, realidx2_reg, rgctx_ins->dreg);
} else {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, realidx2_reg, size);
}
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
ins->klass = klass;
ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
static MonoInst*
mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
{
int bounds_reg = alloc_preg (cfg);
int add_reg = alloc_ireg_mp (cfg);
int mult_reg = alloc_preg (cfg);
int mult2_reg = alloc_preg (cfg);
int low1_reg = alloc_preg (cfg);
int low2_reg = alloc_preg (cfg);
int high1_reg = alloc_preg (cfg);
int high2_reg = alloc_preg (cfg);
int realidx1_reg = alloc_preg (cfg);
int realidx2_reg = alloc_preg (cfg);
int sum_reg = alloc_preg (cfg);
int index1, index2;
MonoInst *ins;
guint32 size;
mono_class_init_internal (klass);
size = mono_class_array_element_size (klass);
index1 = index_ins1->dreg;
index2 = index_ins2->dreg;
#if SIZEOF_REGISTER == 8
/* The array reg is 64 bits but the index reg is only 32 */
if (COMPILE_LLVM (cfg)) {
/* Not needed */
} else {
int tmpreg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
index1 = tmpreg;
tmpreg = alloc_preg (cfg);
MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
index2 = tmpreg;
}
#else
// FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
#endif
/* range checking */
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
ins->type = STACK_MP;
ins->klass = klass;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
static MonoInst*
mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set)
{
int rank;
MonoInst *addr;
MonoMethod *addr_method;
int element_size;
MonoClass *eclass = m_class_get_element_class (cmethod->klass);
gboolean bounded = m_class_get_byval_arg (cmethod->klass) ? m_class_get_byval_arg (cmethod->klass)->type == MONO_TYPE_ARRAY : FALSE;
rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0);
if (rank == 1)
return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE, bounded);
/* emit_ldelema_2 depends on OP_LMUL */
if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
}
if (mini_is_gsharedvt_variable_klass (eclass))
element_size = 0;
else
element_size = mono_class_array_element_size (eclass);
addr_method = mono_marshal_get_array_address (rank, element_size);
addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
return addr;
}
static gboolean
mini_class_is_reference (MonoClass *klass)
{
return mini_type_is_reference (m_class_get_byval_arg (klass));
}
MonoInst*
mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
{
if (safety_checks && mini_class_is_reference (klass) &&
!(MONO_INS_IS_PCONST_NULL (sp [2]))) {
MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class);
MonoMethod *helper;
MonoInst *iargs [3];
if (sp [0]->type != STACK_OBJ)
return NULL;
if (sp [2]->type != STACK_OBJ)
return NULL;
iargs [2] = sp [2];
iargs [1] = sp [1];
iargs [0] = sp [0];
MonoClass *array_class = sp [0]->klass;
if (array_class && m_class_get_rank (array_class) == 1) {
MonoClass *eclass = m_class_get_element_class (array_class);
if (m_class_is_sealed (eclass)) {
helper = mono_marshal_get_virtual_stelemref (array_class);
/* Make a non-virtual call if possible */
return mono_emit_method_call (cfg, helper, iargs, NULL);
}
}
helper = mono_marshal_get_virtual_stelemref (obj_array);
if (!helper->slot)
mono_class_setup_vtable (obj_array);
g_assert (helper->slot);
return mono_emit_method_call (cfg, helper, iargs, sp [0]);
} else {
MonoInst *ins;
if (mini_is_gsharedvt_variable_klass (klass)) {
MonoInst *addr;
// FIXME-VT: OP_ICONST optimization
addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
ins->opcode = OP_STOREV_MEMBASE;
} else if (sp [1]->opcode == OP_ICONST) {
int array_reg = sp [0]->dreg;
int index_reg = sp [1]->dreg;
int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg) && sp [1]->inst_c0 < 0)
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
if (safety_checks)
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg);
} else {
MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks, FALSE);
if (!mini_debug_options.weak_memory_model && mini_class_is_reference (klass))
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
if (mini_class_is_reference (klass))
mini_emit_write_barrier (cfg, addr, sp [2]);
}
return ins;
}
}
MonoInst*
mini_emit_memory_barrier (MonoCompile *cfg, int kind)
{
MonoInst *ins = NULL;
MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
MONO_ADD_INS (cfg->cbb, ins);
ins->backend.memory_barrier_kind = kind;
return ins;
}
/*
* This entry point could be used later for arbitrary method
* redirection.
*/
inline static MonoInst*
mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
{
if (method->klass == mono_defaults.string_class) {
/* managed string allocation support */
if (strcmp (method->name, "FastAllocateString") == 0) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable_checked (method->klass, cfg->error);
MonoMethod *managed_alloc = NULL;
mono_error_assert_ok (cfg->error); /*Should not fail since it System.String*/
#ifndef MONO_CROSS_COMPILE
managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
#endif
if (!managed_alloc)
return NULL;
EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
iargs [1] = args [0];
return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
}
}
return NULL;
}
static void
mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
{
MonoInst *store, *temp;
int i;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
/*
* FIXME: We should use *args++ = sp [0], but that would mean the arg
* would be different than the MonoInst's used to represent arguments, and
* the ldelema implementation can't deal with that.
* Solution: When ldelema is used on an inline argument, create a var for
* it, emit ldelema on that var, and emit the saving code below in
* inline_method () if needed.
*/
temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
cfg->args [i] = temp;
/* This uses cfg->args [i] which is set by the preceding line */
EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
store->cil_code = sp [0]->cil_code;
sp++;
}
}
#define MONO_INLINE_CALLED_LIMITED_METHODS 1
#define MONO_INLINE_CALLER_LIMITED_METHODS 1
#if (MONO_INLINE_CALLED_LIMITED_METHODS)
static gboolean
check_inline_called_method_name_limit (MonoMethod *called_method)
{
int strncmp_result;
static const char *limit = NULL;
if (limit == NULL) {
const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
if (limit_string != NULL)
limit = limit_string;
else
limit = "";
}
if (limit [0] != '\0') {
char *called_method_name = mono_method_full_name (called_method, TRUE);
strncmp_result = strncmp (called_method_name, limit, strlen (limit));
g_free (called_method_name);
//return (strncmp_result <= 0);
return (strncmp_result == 0);
} else {
return TRUE;
}
}
#endif
#if (MONO_INLINE_CALLER_LIMITED_METHODS)
static gboolean
check_inline_caller_method_name_limit (MonoMethod *caller_method)
{
int strncmp_result;
static const char *limit = NULL;
if (limit == NULL) {
const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
if (limit_string != NULL) {
limit = limit_string;
} else {
limit = "";
}
}
if (limit [0] != '\0') {
char *caller_method_name = mono_method_full_name (caller_method, TRUE);
strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
g_free (caller_method_name);
//return (strncmp_result <= 0);
return (strncmp_result == 0);
} else {
return TRUE;
}
}
#endif
void
mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
{
static double r8_0 = 0.0;
static float r4_0 = 0.0;
MonoInst *ins;
int t;
rtype = mini_get_underlying_type (rtype);
t = rtype->type;
if (m_type_is_byref (rtype)) {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
} else if (cfg->r4fp && t == MONO_TYPE_R4) {
MONO_INST_NEW (cfg, ins, OP_R4CONST);
ins->type = STACK_R4;
ins->inst_p0 = (void*)&r4_0;
ins->dreg = dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
ins->inst_p0 = (void*)&r8_0;
ins->dreg = dreg;
MONO_ADD_INS (cfg->cbb, ins);
} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
} else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
} else {
MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
}
}
static void
emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
{
int t;
rtype = mini_get_underlying_type (rtype);
t = rtype->type;
if (m_type_is_byref (rtype)) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
} else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
} else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
} else if (cfg->r4fp && t == MONO_TYPE_R4) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
} else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
} else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
} else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
} else {
mini_emit_init_rvar (cfg, dreg, rtype);
}
}
/* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
static void
emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
{
MonoInst *var = cfg->locals [local];
if (COMPILE_SOFT_FLOAT (cfg)) {
MonoInst *store;
int reg = alloc_dreg (cfg, (MonoStackType)var->type);
mini_emit_init_rvar (cfg, reg, type);
EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
} else {
if (init)
mini_emit_init_rvar (cfg, var->dreg, type);
else
emit_dummy_init_rvar (cfg, var->dreg, type);
}
}
int
mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
{
return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always, NULL);
}
/*
* inline_method:
*
* Return the cost of inlining CMETHOD, or zero if it should not be inlined.
*/
static int
inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
guchar *ip, guint real_offset, gboolean inline_always, gboolean *is_empty)
{
ERROR_DECL (error);
MonoInst *ins, *rvar = NULL;
MonoMethodHeader *cheader;
MonoBasicBlock *ebblock, *sbblock;
int i, costs;
MonoInst **prev_locals, **prev_args;
MonoType **prev_arg_types;
guint prev_real_offset;
GHashTable *prev_cbb_hash;
MonoBasicBlock **prev_cil_offset_to_bb;
MonoBasicBlock *prev_cbb;
const guchar *prev_ip;
guchar *prev_cil_start;
guint32 prev_cil_offset_to_bb_len;
MonoMethod *prev_current_method;
MonoGenericContext *prev_generic_context;
gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
#if (MONO_INLINE_CALLED_LIMITED_METHODS)
if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
return 0;
#endif
#if (MONO_INLINE_CALLER_LIMITED_METHODS)
if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
return 0;
#endif
if (!fsig)
fsig = mono_method_signature_internal (cmethod);
if (cfg->verbose_level > 2)
printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
if (!cmethod->inline_info) {
cfg->stat_inlineable_methods++;
cmethod->inline_info = 1;
}
if (is_empty)
*is_empty = FALSE;
/* allocate local variables */
cheader = mono_method_get_header_checked (cmethod, error);
if (!cheader) {
if (inline_always) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_move (cfg->error, error);
} else {
mono_error_cleanup (error);
}
return 0;
}
if (is_empty && cheader->code_size == 1 && cheader->code [0] == CEE_RET)
*is_empty = TRUE;
/* allocate space to store the return value */
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
}
prev_locals = cfg->locals;
cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
for (i = 0; i < cheader->num_locals; ++i)
cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
/* allocate start and end blocks */
/* This is needed so if the inline is aborted, we can clean up */
NEW_BBLOCK (cfg, sbblock);
sbblock->real_offset = real_offset;
NEW_BBLOCK (cfg, ebblock);
ebblock->block_num = cfg->num_bblocks++;
ebblock->real_offset = real_offset;
prev_args = cfg->args;
prev_arg_types = cfg->arg_types;
prev_ret_var_set = cfg->ret_var_set;
prev_real_offset = cfg->real_offset;
prev_cbb_hash = cfg->cbb_hash;
prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
prev_cil_start = cfg->cil_start;
prev_ip = cfg->ip;
prev_cbb = cfg->cbb;
prev_current_method = cfg->current_method;
prev_generic_context = cfg->generic_context;
prev_disable_inline = cfg->disable_inline;
cfg->ret_var_set = FALSE;
cfg->inline_depth ++;
if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
virtual_ = TRUE;
costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
ret_var_set = cfg->ret_var_set;
cfg->real_offset = prev_real_offset;
cfg->cbb_hash = prev_cbb_hash;
cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
cfg->cil_start = prev_cil_start;
cfg->ip = prev_ip;
cfg->locals = prev_locals;
cfg->args = prev_args;
cfg->arg_types = prev_arg_types;
cfg->current_method = prev_current_method;
cfg->generic_context = prev_generic_context;
cfg->ret_var_set = prev_ret_var_set;
cfg->disable_inline = prev_disable_inline;
cfg->inline_depth --;
if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
if (cfg->verbose_level > 2)
printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
mono_error_assert_ok (cfg->error);
cfg->stat_inlined_methods++;
/* always add some code to avoid block split failures */
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (prev_cbb, ins);
prev_cbb->next_bb = sbblock;
link_bblock (cfg, prev_cbb, sbblock);
/*
* Get rid of the begin and end bblocks if possible to aid local
* optimizations.
*/
if (prev_cbb->out_count == 1)
mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
MonoBasicBlock *prev = ebblock->in_bb [0];
if (prev->next_bb == ebblock) {
mono_merge_basic_blocks (cfg, prev, ebblock);
cfg->cbb = prev;
if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
mono_merge_basic_blocks (cfg, prev_cbb, prev);
cfg->cbb = prev_cbb;
}
} else {
/* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
cfg->cbb = ebblock;
}
} else {
/*
* Its possible that the rvar is set in some prev bblock, but not in others.
* (#1835).
*/
if (rvar) {
MonoBasicBlock *bb;
for (i = 0; i < ebblock->in_count; ++i) {
bb = ebblock->in_bb [i];
if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
cfg->cbb = bb;
mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret);
}
}
}
cfg->cbb = ebblock;
}
if (rvar) {
/*
* If the inlined method contains only a throw, then the ret var is not
* set, so set it to a dummy value.
*/
if (!ret_var_set)
mini_emit_init_rvar (cfg, rvar->dreg, fsig->ret);
EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
*sp++ = ins;
}
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
return costs + 1;
} else {
if (cfg->verbose_level > 2) {
const char *msg = mono_error_get_message (cfg->error);
printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : "");
}
cfg->exception_type = MONO_EXCEPTION_NONE;
clear_cfg_error (cfg);
/* This gets rid of the newly added bblocks */
cfg->cbb = prev_cbb;
}
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
return 0;
}
/*
* Some of these comments may well be out-of-date.
* Design decisions: we do a single pass over the IL code (and we do bblock
* splitting/merging in the few cases when it's required: a back jump to an IL
* address that was not already seen as bblock starting point).
* Code is validated as we go (full verification is still better left to metadata/verify.c).
* Complex operations are decomposed in simpler ones right away. We need to let the
* arch-specific code peek and poke inside this process somehow (except when the
* optimizations can take advantage of the full semantic info of coarse opcodes).
* All the opcodes of the form opcode.s are 'normalized' to opcode.
* MonoInst->opcode initially is the IL opcode or some simplification of that
* (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
* opcode with value bigger than OP_LAST.
* At this point the IR can be handed over to an interpreter, a dumb code generator
* or to the optimizing code generator that will translate it to SSA form.
*
* Profiling directed optimizations.
* We may compile by default with few or no optimizations and instrument the code
* or the user may indicate what methods to optimize the most either in a config file
* or through repeated runs where the compiler applies offline the optimizations to
* each method and then decides if it was worth it.
*/
#define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
#define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
#define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED
#define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
#define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
#define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED
#define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
#define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
/* offset from br.s -> br like opcodes */
#define BIG_BRANCH_OFFSET 13
static gboolean
ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
{
MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
return b == NULL || b == bb;
}
static int
get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos)
{
guchar *ip = start;
guchar *target;
int i;
guint cli_addr;
MonoBasicBlock *bblock;
const MonoOpcode *opcode;
while (ip < end) {
cli_addr = ip - start;
i = mono_opcode_value ((const guint8 **)&ip, end);
if (i < 0)
UNVERIFIED;
opcode = &mono_opcodes [i];
switch (opcode->argument) {
case MonoInlineNone:
ip++;
break;
case MonoInlineString:
case MonoInlineType:
case MonoInlineField:
case MonoInlineMethod:
case MonoInlineTok:
case MonoInlineSig:
case MonoShortInlineR:
case MonoInlineI:
ip += 5;
break;
case MonoInlineVar:
ip += 3;
break;
case MonoShortInlineVar:
case MonoShortInlineI:
ip += 2;
break;
case MonoShortInlineBrTarget:
target = start + cli_addr + 2 + (signed char)ip [1];
GET_BBLOCK (cfg, bblock, target);
ip += 2;
if (ip < end)
GET_BBLOCK (cfg, bblock, ip);
break;
case MonoInlineBrTarget:
target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
GET_BBLOCK (cfg, bblock, target);
ip += 5;
if (ip < end)
GET_BBLOCK (cfg, bblock, ip);
break;
case MonoInlineSwitch: {
guint32 n = read32 (ip + 1);
guint32 j;
ip += 5;
cli_addr += 5 + 4 * n;
target = start + cli_addr;
GET_BBLOCK (cfg, bblock, target);
for (j = 0; j < n; ++j) {
target = start + cli_addr + (gint32)read32 (ip);
GET_BBLOCK (cfg, bblock, target);
ip += 4;
}
break;
}
case MonoInlineR:
case MonoInlineI8:
ip += 9;
break;
default:
g_assert_not_reached ();
}
if (i == CEE_THROW) {
guchar *bb_start = ip - 1;
/* Find the start of the bblock containing the throw */
bblock = NULL;
while ((bb_start >= start) && !bblock) {
bblock = cfg->cil_offset_to_bb [(bb_start) - start];
bb_start --;
}
if (bblock)
bblock->out_of_line = 1;
}
}
return 0;
unverified:
exception_exit:
*pos = ip;
return 1;
}
static MonoMethod *
mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
{
MonoMethod *method;
error_init (error);
if (m->wrapper_type != MONO_WRAPPER_NONE) {
method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
if (context) {
method = mono_class_inflate_generic_method_checked (method, context, error);
}
} else {
method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error);
}
return method;
}
static MonoMethod *
mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
{
ERROR_DECL (error);
MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? cfg->error : error);
if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) {
mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared");
method = NULL;
}
if (!method && !cfg)
mono_error_cleanup (error); /* FIXME don't swallow the error */
return method;
}
static MonoMethodSignature*
mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
{
MonoMethodSignature *fsig;
error_init (error);
if (method->wrapper_type != MONO_WRAPPER_NONE) {
fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
} else {
fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error);
return_val_if_nok (error, NULL);
}
if (context) {
fsig = mono_inflate_generic_signature(fsig, context, error);
}
return fsig;
}
/*
* Return the original method is a wrapper is specified. We can only access
* the custom attributes from the original method.
*/
static MonoMethod*
get_original_method (MonoMethod *method)
{
if (method->wrapper_type == MONO_WRAPPER_NONE)
return method;
/* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
return NULL;
/* in other cases we need to find the original method */
return mono_marshal_method_from_wrapper (method);
}
static guchar*
il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op)
// If ip is desired_il_op, return the next ip, else NULL.
{
if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) {
MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid;
// mono_opcode_value_and_size updates ip, but not in the expected way.
const guchar *temp_ip = ip;
const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op);
return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL;
}
return NULL;
}
static guchar*
il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token)
{
ip = il_read_op (ip, end, first_byte, desired_il_op);
if (ip)
*token = read32 (ip - 4); // could be +1 or +2 from start
return ip;
}
static guchar*
il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target)
{
ip = il_read_op (ip, end, first_byte, desired_il_op);
if (ip) {
gint32 delta = 0;
switch (size) {
case 1:
delta = (signed char)ip [-1];
break;
case 4:
delta = (gint32)read32 (ip - 4);
break;
}
// FIXME verify it is within the function and start of an instruction.
*target = ip + delta;
return ip;
}
return NULL;
}
#define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target))
#define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target))
#define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target))
#define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target))
#define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP))
#define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token))
#define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token))
#define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token))
#define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token))
#define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token))
#define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token))
#define il_read_unbox_any(ip, end, token) (il_read_op_and_token (ip, end, CEE_UNBOX_ANY, MONO_CEE_UNBOX_ANY, token))
/*
* Check that the IL instructions at ip are the array initialization
* sequence and return the pointer to the data and the size.
*/
static const char*
initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip,
guchar *end, MonoClass *klass, guint32 len, int *out_size,
guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip)
{
/*
* newarr[System.Int32]
* dup
* ldtoken field valuetype ...
* call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
*/
guint32 token;
guint32 field_token;
if ((ip = il_read_dup (ip, end))
&& ip_in_bb (cfg, cfg->cbb, ip)
&& (ip = il_read_ldtoken (ip, end, &field_token))
&& IS_FIELD_DEF (field_token)
&& ip_in_bb (cfg, cfg->cbb, ip)
&& (ip = il_read_call (ip, end, &token))) {
ERROR_DECL (error);
guint32 rva;
const char *data_ptr;
int size = 0;
MonoMethod *cmethod;
MonoClass *dummy_class;
MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error);
int dummy_align;
if (!field) {
mono_error_cleanup (error); /* FIXME don't swallow the error */
return NULL;
}
*out_field_token = field_token;
cmethod = mini_get_method (NULL, method, token, NULL, NULL);
if (!cmethod)
return NULL;
if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib)
return NULL;
switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
size = 1; break;
/* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
#if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
case MONO_TYPE_I2:
case MONO_TYPE_U2:
size = 2; break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_R4:
size = 4; break;
case MONO_TYPE_R8:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
size = 8; break;
#endif
default:
return NULL;
}
size *= len;
if (size > mono_type_size (field->type, &dummy_align))
return NULL;
*out_size = size;
/*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
MonoImage *method_klass_image = m_class_get_image (method->klass);
if (!image_is_dynamic (method_klass_image)) {
guint32 field_index = mono_metadata_token_index (field_token);
mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL);
data_ptr = mono_image_rva_map (method_klass_image, rva);
/*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
/* for aot code we do the lookup on load */
if (aot && data_ptr)
data_ptr = (const char *)GUINT_TO_POINTER (rva);
} else {
/*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
g_assert (!aot);
data_ptr = mono_field_get_data (field);
}
if (!data_ptr)
return NULL;
*il_op = MONO_CEE_CALL;
*next_ip = ip;
return data_ptr;
}
return NULL;
}
static void
set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip)
{
ERROR_DECL (error);
char *method_fname = mono_method_full_name (method, TRUE);
char *method_code;
MonoMethodHeader *header = mono_method_get_header_checked (method, error);
if (!header) {
method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error));
mono_error_cleanup (error);
} else if (header->code_size == 0)
method_code = g_strdup ("method body is empty.");
else
method_code = mono_disasm_code_one (NULL, method, ip, NULL);
mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
g_free (method_fname);
g_free (method_code);
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
guint32
mono_type_to_stloc_coerce (MonoType *type)
{
if (m_type_is_byref (type))
return 0;
type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
return OP_ICONV_TO_I1;
case MONO_TYPE_U1:
return OP_ICONV_TO_U1;
case MONO_TYPE_I2:
return OP_ICONV_TO_I2;
case MONO_TYPE_U2:
return OP_ICONV_TO_U2;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_TYPEDBYREF:
case MONO_TYPE_GENERICINST:
return 0;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
}
return 0;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
return 0;
default:
g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
}
return -1;
}
static void
emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
{
MonoInst *ins;
guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
if (coerce_op) {
if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
if (cfg->verbose_level > 2)
printf ("Found existing coercing is enough for stloc\n");
} else {
MONO_INST_NEW (cfg, ins, coerce_op);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I4;
ins->klass = mono_class_from_mono_type_internal (header->locals [n]);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
}
guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
if (!cfg->deopt && (opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
/* Optimize reg-reg moves away */
/*
* Can't optimize other opcodes, since sp[0] might point to
* the last ins of a decomposed opcode.
*/
sp [0]->dreg = (cfg)->locals [n]->dreg;
} else {
EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
}
}
static void
emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
{
MonoInst *ins;
guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
if (coerce_op) {
if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
if (cfg->verbose_level > 2)
printf ("Found existing coercing is enough for starg\n");
} else {
MONO_INST_NEW (cfg, ins, coerce_op);
ins->dreg = alloc_ireg (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I4;
ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
}
EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
}
/*
* ldloca inhibits many optimizations so try to get rid of it in common
* cases.
*/
static guchar *
emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local)
{
guint32 token;
MonoClass *klass;
MonoType *type;
guchar *start = ip;
if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) {
/* From the INITOBJ case */
klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
CHECK_TYPELOAD (klass);
type = mini_get_underlying_type (m_class_get_byval_arg (klass));
emit_init_local (cfg, local, type, TRUE);
return ip;
}
exception_exit:
return NULL;
}
static MonoInst*
handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res)
{
/*
* Devirt EqualityComparer.Default.Equals () calls for some types.
* The corefx code excepts these calls to be devirtualized.
* This depends on the implementation of EqualityComparer.Default, which is
* in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs
*/
if (m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
!strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") &&
!strcmp (cmethod->name, "get_Default")) {
MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0];
MonoClass *inst;
MonoGenericContext ctx;
ERROR_DECL (error);
memset (&ctx, 0, sizeof (ctx));
MonoType *args [ ] = { param_type };
ctx.class_inst = mono_metadata_get_generic_inst (1, args);
inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error);
mono_error_assert_ok (error);
/* EqualityComparer<T>.Default returns specific types depending on T */
// FIXME: Add more
/* 1. Implements IEquatable<T> */
/*
* Can't use this for string/byte as it might use a different comparer:
*
* // Specialize type byte for performance reasons
* if (t == typeof(byte)) {
* return (EqualityComparer<T>)(object)(new ByteEqualityComparer());
* }
* #if MOBILE
* // Breaks .net serialization compatibility
* if (t == typeof (string))
* return (EqualityComparer<T>)(object)new InternalStringComparer ();
* #endif
*/
if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_U1 && param_type->type != MONO_TYPE_STRING) {
MonoInst *typed_objref;
MonoClass *gcomparer_inst;
memset (&ctx, 0, sizeof (ctx));
args [0] = param_type;
ctx.class_inst = mono_metadata_get_generic_inst (1, args);
MonoClass *gcomparer = mono_class_get_geqcomparer_class ();
g_assert (gcomparer);
gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error);
if (is_ok (error)) {
MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF);
typed_objref->type = STACK_OBJ;
typed_objref->dreg = alloc_ireg_ref (cfg);
typed_objref->sreg1 = call_res->dreg;
typed_objref->klass = gcomparer_inst;
MONO_ADD_INS (cfg->cbb, typed_objref);
call_res = typed_objref;
/* Force decompose */
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
}
}
}
return call_res;
}
static gboolean
is_exception_class (MonoClass *klass)
{
if (G_LIKELY (m_class_get_supertypes (klass)))
return mono_class_has_parent_fast (klass, mono_defaults.exception_class);
while (klass) {
if (klass == mono_defaults.exception_class)
return TRUE;
klass = m_class_get_parent (klass);
}
return FALSE;
}
/*
* is_jit_optimizer_disabled:
*
* Determine whenever M's assembly has a DebuggableAttribute with the
* IsJITOptimizerDisabled flag set.
*/
static gboolean
is_jit_optimizer_disabled (MonoMethod *m)
{
MonoAssembly *ass = m_class_get_image (m->klass)->assembly;
g_assert (ass);
if (ass->jit_optimizer_disabled_inited)
return ass->jit_optimizer_disabled;
return mono_assembly_is_jit_optimizer_disabled (ass);
}
gboolean
mono_is_supported_tailcall_helper (gboolean value, const char *svalue)
{
if (!value)
mono_tailcall_print ("%s %s\n", __func__, svalue);
return value;
}
static gboolean
mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod)
{
// Return value, printing if it inhibits tailcall.
if (value && mono_tailcall_print_enabled ()) {
const char *lparen = strchr (svalue, ' ') ? "(" : "";
const char *rparen = *lparen ? ")" : "";
mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value);
}
return value;
}
#define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod))
static gboolean
is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig,
gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli)
{
// Some checks apply to "regular", some to "calli", some to both.
// To ease burden on caller, always compute regular and calli.
gboolean tailcall = TRUE;
gboolean tailcall_calli = TRUE;
if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase))
tailcall = FALSE;
if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg))
tailcall_calli = FALSE;
if (!tailcall && !tailcall_calli)
goto exit;
// FIXME in calli, there is no type for for the this parameter,
// so we assume it might be valuetype; in future we should issue a range
// check, so rule out pointing to frame (for other reference parameters also)
if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check?
|| IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
|| IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli)
|| IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf)
|| IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check
|| IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
// http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/
//
// 1. Non-generic non-static methods of reference types have access to the
// RGCTX via the "this" argument (this->vtable->rgctx).
// 2. a Non-generic static methods of reference types and b. non-generic methods
// of value types need to be passed a pointer to the caller's class's VTable in the MONO_ARCH_RGCTX_REG register.
// 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register
//
// That is what vtable_arg is here (always?).
//
// Passing vtable_arg uses (requires?) a volatile non-parameter register,
// such as AMD64 rax, r10, r11, or the return register on many architectures.
// ARM32 does not always clearly have such a register. ARM32's return register
// is a parameter register.
// iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly
// important. Linux/arm32 is less clear.
// ARM32's scratch r12 might work but only with much collateral change.
//
// Imagine F1 calls F2, and F2 tailcalls F3.
// F2 and F3 are managed. F1 is native.
// Without a tailcall, F2 can save and restore everything needed for F1.
// However if the extra parameter were in a non-volatile, such as ARM32 V5/R8,
// F3 cannot easily restore it for F1, in the current scheme. The current
// scheme where the extra parameter is not merely an extra parameter, but
// passed "outside of the ABI".
//
// If all native to managed transitions are intercepted and wrapped (w/o tailcall),
// then they can preserve this register and the rest of the managed callgraph
// treat it as volatile.
//
// Interface method dispatch has the same problem (imt_arg).
|| IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register)
|| IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt)
) {
tailcall_calli = FALSE;
tailcall = FALSE;
goto exit;
}
for (int i = 0; i < fsig->param_count; ++i) {
if (IS_NOT_SUPPORTED_TAILCALL (m_type_is_byref (fsig->params [i]) || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) {
tailcall_calli = FALSE;
tailcall = FALSE; // These can point to the current method's stack. Emit range check?
goto exit;
}
}
MonoMethodSignature *caller_signature;
MonoMethodSignature *callee_signature;
caller_signature = mono_method_signature_internal (method);
callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig;
g_assert (caller_signature);
g_assert (callee_signature);
// Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped.
// The main troublesome conversions are double <=> float.
// CoreCLR allows some conversions here, such as integer truncation.
// As well I <=> I[48] and U <=> U[48] would be ok, for matching size.
if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type)
|| IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) {
tailcall_calli = FALSE;
tailcall = FALSE;
goto exit;
}
/* Debugging support */
#if 0
if (!mono_debug_count ()) {
tailcall_calli = FALSE;
tailcall = FALSE;
goto exit;
}
#endif
// See check_sp in mini_emit_calli_full.
if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg)))
tailcall_calli = FALSE;
exit:
mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n",
mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli,
cfg->gshared, extra_arg, virtual_);
*ptailcall_calli = tailcall_calli;
return tailcall;
}
/*
* is_addressable_valuetype_load
*
* Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
*/
static gboolean
is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype)
{
/* Avoid loading a struct just to load one of its fields */
gboolean is_load_instruction = (*ip == CEE_LDFLD);
gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip);
gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype);
return is_load_instruction && is_in_previous_bb && is_struct;
}
/*
* handle_ctor_call:
*
* Handle calls made to ctors from NEWOBJ opcodes.
*/
static void
handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
MonoInst **sp, guint8 *ip, int *inline_costs)
{
MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
g_assert (MONO_TYPE_IS_VOID (fsig->ret));
CHECK_CFG_EXCEPTION;
return;
}
if (mono_class_generic_sharing_enabled (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE)) {
MonoRgctxAccess access = mini_get_rgctx_access_for_method (cmethod);
if (access == MONO_RGCTX_ACCESS_MRGCTX) {
mono_class_vtable_checked (cmethod->klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
vtable_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
} else if (access == MONO_RGCTX_ACCESS_VTABLE) {
vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
cmethod->klass, MONO_RGCTX_INFO_VTABLE);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
} else {
g_assert (access == MONO_RGCTX_ACCESS_THIS);
}
}
/* Avoid virtual calls to ctors if possible */
if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
mono_method_check_inlining (cfg, cmethod) &&
!mono_class_is_subclass_of_internal (cmethod->klass, mono_defaults.exception_class, FALSE)) {
int costs;
if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, NULL))) {
cfg->real_offset += 5;
*inline_costs += costs - 5;
} else {
INLINE_FAILURE ("inline failure");
// FIXME-VT: Clean this up
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE(*ip);
mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
}
} else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
MonoInst *addr;
addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
if (cfg->llvm_only) {
// FIXME: Avoid initializing vtable_arg
mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
}
} else if (context_used &&
((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
!mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
MonoInst *cmethod_addr;
/* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
if (cfg->llvm_only) {
MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
MONO_RGCTX_INFO_METHOD_FTNDESC);
mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
} else {
cmethod_addr = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
}
} else {
INLINE_FAILURE ("ctor call");
ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
callvirt_this_arg, NULL, vtable_arg);
}
exception_exit:
mono_error_exit:
return;
}
typedef struct {
MonoMethod *method;
gboolean inst_tailcall;
} HandleCallData;
/*
* handle_constrained_call:
*
* Handle constrained calls. Return a MonoInst* representing the call or NULL.
* May overwrite sp [0] and modify the ref_... parameters.
*/
static MonoInst*
handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp,
HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen)
{
MonoInst *ins, *addr;
MonoMethod *method = cdata->method;
gboolean constrained_partial_call = FALSE;
gboolean constrained_is_generic_param =
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
MonoType *gshared_constraint = NULL;
if (constrained_is_generic_param && cfg->gshared) {
if (!mini_is_gsharedvt_klass (constrained_class)) {
g_assert (!m_class_is_valuetype (cmethod->klass));
if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class)))
constrained_partial_call = TRUE;
MonoType *t = m_class_get_byval_arg (constrained_class);
MonoGenericParam *gparam = t->data.generic_param;
gshared_constraint = gparam->gshared_constraint;
}
}
if (mini_is_gsharedvt_klass (constrained_class)) {
if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) {
/* The 'Own method' case below */
} else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) {
/* 'The type parameter is instantiated as a reference type' case below. */
} else {
ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen);
CHECK_CFG_EXCEPTION;
g_assert (ins);
if (cdata->inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name);
return ins;
}
}
if (m_method_is_static (cmethod)) {
/* Call to an abstract static method, handled normally */
return NULL;
} else if (constrained_partial_call) {
gboolean need_box = TRUE;
/*
* The receiver is a valuetype, but the exact type is not known at compile time. This means the
* called method is not known at compile time either. The called method could end up being
* one of the methods on the parent classes (object/valuetype/enum), in which case we need
* to box the receiver.
* A simple solution would be to box always and make a normal virtual call, but that would
* be bad performance wise.
*/
if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass) &&
(cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
/*
* The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing necessary.
*/
/* If the method is not abstract, it's a default interface method, and we need to box */
need_box = FALSE;
}
if (gshared_constraint && MONO_TYPE_IS_PRIMITIVE (gshared_constraint) && cmethod->klass == mono_defaults.object_class &&
!strcmp (cmethod->name, "GetHashCode")) {
/*
* The receiver is constrained to a primitive type or an enum with the same basetype.
* Enum.GetHashCode () returns the hash code of the underlying type (see comments in Enum.cs),
* so the constrained call can be replaced with a normal call to the basetype GetHashCode ()
* method.
*/
MonoClass *gshared_constraint_class = mono_class_from_mono_type_internal (gshared_constraint);
cmethod = get_method_nofail (gshared_constraint_class, cmethod->name, 0, 0);
g_assert (cmethod);
*ref_cmethod = cmethod;
*ref_virtual = FALSE;
if (cfg->verbose_level)
printf (" -> %s\n", mono_method_get_full_name (cmethod));
return NULL;
}
if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) {
/* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
} else if (need_box) {
MonoInst *box_type;
MonoBasicBlock *is_ref_bb, *end_bb;
MonoInst *nonbox_call, *addr;
/*
* Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
* if needed.
* FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
* the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
*/
addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
NEW_BBLOCK (cfg, is_ref_bb);
NEW_BBLOCK (cfg, end_bb);
box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
/* Non-ref case */
if (cfg->llvm_only)
/* addr is an ftndesc in this case */
nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Ref case */
MONO_START_BB (cfg, is_ref_bb);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
if (cfg->llvm_only)
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
cfg->cbb = end_bb;
nonbox_call->dreg = ins->dreg;
if (cdata->inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name);
return ins;
} else {
g_assert (mono_class_is_interface (cmethod->klass));
addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
if (cfg->llvm_only)
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
if (cdata->inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name);
return ins;
}
} else if (!m_class_is_valuetype (constrained_class)) {
int dreg = alloc_ireg_ref (cfg);
/*
* The type parameter is instantiated as a reference
* type. We have a managed pointer on the stack, so
* we need to dereference it here.
*/
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
ins->type = STACK_OBJ;
sp [0] = ins;
} else if (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class) {
/*
* The type parameter is instantiated as a valuetype,
* but that type doesn't override the method we're
* calling, so we need to box `this'.
*/
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
} else {
if (cmethod->klass != constrained_class) {
/* Enums/default interface methods */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
ins->klass = constrained_class;
sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
CHECK_CFG_EXCEPTION;
}
*ref_virtual = FALSE;
}
exception_exit:
return NULL;
}
static void
emit_setret (MonoCompile *cfg, MonoInst *val)
{
MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
MonoInst *ins;
if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
MonoInst *ret_addr;
if (!cfg->vret_addr) {
EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
} else {
EMIT_NEW_RETLOADA (cfg, ret_addr);
MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type);
if (MONO_CLASS_IS_SIMD (cfg, ret_class))
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg);
else
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
ins->klass = ret_class;
}
} else {
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg) && !m_type_is_byref (ret_type) && ret_type->type == MONO_TYPE_R4) {
MonoInst *conv;
MonoInst *iargs [ ] = { val };
conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
mono_arch_emit_setret (cfg, cfg->method, conv);
} else {
mono_arch_emit_setret (cfg, cfg->method, val);
}
#else
mono_arch_emit_setret (cfg, cfg->method, val);
#endif
}
}
/*
* Emit a call to enter the interpreter for methods with filter clauses.
*/
static void
emit_llvmonly_interp_entry (MonoCompile *cfg, MonoMethodHeader *header)
{
MonoInst *ins;
MonoInst **iargs;
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
MonoInst *ftndesc;
cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
/*
* Emit a call to the interp entry function. We emit it here instead of the llvm backend since
* calling conventions etc. are easier to handle here. The LLVM backend will only emit the
* entry/exit bblocks.
*/
g_assert (cfg->cbb == cfg->bb_init);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (sig)) {
/*
* Would have to generate a gsharedvt out wrapper which calls the interp entry wrapper, but
* the gsharedvt out wrapper might not exist if the caller is also a gsharedvt method since
* the concrete signature of the call might not exist in the program.
* So transition directly to the interpreter without the wrappers.
*/
MonoInst *args_ins;
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = sig->param_count * sizeof (target_mgreg_t);
MONO_ADD_INS (cfg->cbb, ins);
args_ins = ins;
for (int i = 0; i < sig->hasthis + sig->param_count; ++i) {
MonoInst *arg_addr_ins;
EMIT_NEW_VARLOADA ((cfg), arg_addr_ins, cfg->args [i], cfg->arg_types [i]);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args_ins->dreg, i * sizeof (target_mgreg_t), arg_addr_ins->dreg);
}
MonoInst *ret_var = NULL;
MonoInst *ret_arg_ins;
if (!MONO_TYPE_IS_VOID (sig->ret)) {
ret_var = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
EMIT_NEW_VARLOADA (cfg, ret_arg_ins, ret_var, sig->ret);
} else {
EMIT_NEW_PCONST (cfg, ret_arg_ins, NULL);
}
iargs = g_newa (MonoInst*, 3);
iargs [0] = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_INTERP_METHOD);
iargs [1] = ret_arg_ins;
iargs [2] = args_ins;
mono_emit_jit_icall_id (cfg, MONO_JIT_ICALL_mini_llvmonly_interp_entry_gsharedvt, iargs);
if (!MONO_TYPE_IS_VOID (sig->ret))
EMIT_NEW_VARLOAD (cfg, ins, ret_var, sig->ret);
else
ins = NULL;
} else {
/* Obtain the interp entry function */
ftndesc = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY);
/* Call it */
iargs = g_newa (MonoInst*, sig->param_count + 1);
for (int i = 0; i < sig->param_count + sig->hasthis; ++i)
EMIT_NEW_ARGLOAD (cfg, iargs [i], i);
ins = mini_emit_llvmonly_calli (cfg, sig, iargs, ftndesc);
}
/* Do a normal return */
if (cfg->ret) {
emit_setret (cfg, ins);
/*
* Since only bb_entry/bb_exit is emitted if interp_entry_only is set,
* its possible that the return value becomes an OP_PHI node whose inputs
* are not emitted. Make it volatile to prevent that.
*/
cfg->ret->flags |= MONO_INST_VOLATILE;
}
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = cfg->bb_exit;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, cfg->bb_exit);
}
typedef union _MonoOpcodeParameter {
gint32 i32;
gint64 i64;
float f;
double d;
guchar *branch_target;
} MonoOpcodeParameter;
typedef struct _MonoOpcodeInfo {
guint constant : 4; // private
gint pops : 3; // public -1 means variable
gint pushes : 3; // public -1 means variable
} MonoOpcodeInfo;
static const MonoOpcodeInfo*
mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter)
{
#define Push0 (0)
#define Pop0 (0)
#define Push1 (1)
#define Pop1 (1)
#define PushI (1)
#define PopI (1)
#define PushI8 (1)
#define PopI8 (1)
#define PushRef (1)
#define PopRef (1)
#define PushR4 (1)
#define PopR4 (1)
#define PushR8 (1)
#define PopR8 (1)
#define VarPush (-1)
#define VarPop (-1)
static const MonoOpcodeInfo mono_opcode_info [ ] = {
#define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes },
#include "mono/cil/opcode.def"
#undef OPDEF
};
#undef Push0
#undef Pop0
#undef Push1
#undef Pop1
#undef PushI
#undef PopI
#undef PushI8
#undef PopI8
#undef PushRef
#undef PopRef
#undef PushR4
#undef PopR4
#undef PushR8
#undef PopR8
#undef VarPush
#undef VarPop
gint32 delta;
guchar *next_ip = ip + op_size;
const MonoOpcodeInfo *info = &mono_opcode_info [il_op];
switch (mono_opcodes [il_op].argument) {
case MonoInlineNone:
parameter->i32 = (int)info->constant - 1;
break;
case MonoInlineString:
case MonoInlineType:
case MonoInlineField:
case MonoInlineMethod:
case MonoInlineTok:
case MonoInlineSig:
case MonoShortInlineR:
case MonoInlineI:
parameter->i32 = read32 (next_ip - 4);
// FIXME check token type?
break;
case MonoShortInlineI:
parameter->i32 = (signed char)next_ip [-1];
break;
case MonoInlineVar:
parameter->i32 = read16 (next_ip - 2);
break;
case MonoShortInlineVar:
parameter->i32 = next_ip [-1];
break;
case MonoInlineR:
case MonoInlineI8:
parameter->i64 = read64 (next_ip - 8);
break;
case MonoShortInlineBrTarget:
delta = (signed char)next_ip [-1];
goto branch_target;
case MonoInlineBrTarget:
delta = (gint32)read32 (next_ip - 4);
branch_target:
parameter->branch_target = delta + next_ip;
break;
case MonoInlineSwitch: // complicated
break;
default:
g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument);
}
return info;
}
/*
* mono_method_to_ir:
*
* Translate the .net IL into linear IR.
*
* @start_bblock: if not NULL, the starting basic block, used during inlining.
* @end_bblock: if not NULL, the ending basic block, used during inlining.
* @return_var: if not NULL, the place where the return value is stored, used during inlining.
* @inline_args: if not NULL, contains the arguments to the inline call
* @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
* @is_virtual_call: whether this method is being called as a result of a call to callvirt
*
* This method is used to turn ECMA IL into Mono's internal Linear IR
* reprensetation. It is used both for entire methods, as well as
* inlining existing methods. In the former case, the @start_bblock,
* @end_bblock, @return_var, @inline_args are all set to NULL, and the
* inline_offset is set to zero.
*
* Returns: the inline cost, or -1 if there was an error processing this method.
*/
int
mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
MonoInst *return_var, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call)
{
ERROR_DECL (error);
// Buffer to hold parameters to mono_new_array, instead of varargs.
MonoInst *array_new_localalloc_ins = NULL;
MonoInst *ins, **sp, **stack_start;
MonoBasicBlock *tblock = NULL;
MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL;
MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
MonoMethod *method_definition;
MonoInst **arg_array;
MonoMethodHeader *header;
MonoImage *image;
guint32 token, ins_flag;
MonoClass *klass;
MonoClass *constrained_class = NULL;
gboolean save_last_error = FALSE;
guchar *ip, *end, *target, *err_pos;
MonoMethodSignature *sig;
MonoGenericContext *generic_context = NULL;
MonoGenericContainer *generic_container = NULL;
MonoType **param_types;
int i, n, start_new_bblock, dreg;
int num_calls = 0, inline_costs = 0;
guint num_args;
GSList *class_inits = NULL;
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
int context_used;
gboolean init_locals, seq_points, skip_dead_blocks;
gboolean sym_seq_points = FALSE;
MonoDebugMethodInfo *minfo;
MonoBitSet *seq_point_locs = NULL;
MonoBitSet *seq_point_set_locs = NULL;
const char *ovf_exc = NULL;
gboolean emitted_funccall_seq_point = FALSE;
gboolean detached_before_ret = FALSE;
gboolean ins_has_side_effect;
if (!cfg->disable_inline)
cfg->disable_inline = (method->iflags & METHOD_IMPL_ATTRIBUTE_NOOPTIMIZATION) || is_jit_optimizer_disabled (method);
cfg->current_method = method;
image = m_class_get_image (method->klass);
/* serialization and xdomain stuff may need access to private fields and methods */
dont_verify = FALSE;
dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
/* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
header = mono_method_get_header_checked (method, cfg->error);
if (!header) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
} else {
cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
generic_container = mono_method_get_generic_container (method);
sig = mono_method_signature_internal (method);
num_args = sig->hasthis + sig->param_count;
ip = (guchar*)header->code;
cfg->cil_start = ip;
end = ip + header->code_size;
cfg->stat_cil_code_size += header->code_size;
seq_points = cfg->gen_seq_points && cfg->method == method;
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
/* We could hit a seq point before attaching to the JIT (#8338) */
seq_points = FALSE;
}
if (method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
if (info->subtype == WRAPPER_SUBTYPE_INTERP_IN) {
/* We could hit a seq point before attaching to the JIT (#8338) */
seq_points = FALSE;
}
}
if (cfg->prof_coverage) {
if (cfg->compile_aot)
g_error ("Coverage profiling is not supported with AOT.");
INLINE_FAILURE ("coverage profiling");
cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
}
if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) {
minfo = mono_debug_lookup_method (method);
if (minfo) {
MonoSymSeqPoint *sps;
int i, n_il_offsets;
mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
sym_seq_points = TRUE;
for (i = 0; i < n_il_offsets; ++i) {
if (sps [i].il_offset < header->code_size)
mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
}
g_free (sps);
MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
if (asyncMethod) {
for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
{
mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
}
mono_debug_free_method_async_debug_info (asyncMethod);
}
} else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) {
/* Methods without line number info like auto-generated property accessors */
seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
sym_seq_points = TRUE;
}
}
/*
* Methods without init_locals set could cause asserts in various passes
* (#497220). To work around this, we emit dummy initialization opcodes
* (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
* on some platforms.
*/
if (cfg->opt & MONO_OPT_UNSAFE)
init_locals = header->init_locals;
else
init_locals = TRUE;
method_definition = method;
while (method_definition->is_inflated) {
MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
method_definition = imethod->declaring;
}
/* SkipVerification is not allowed if core-clr is enabled */
if (!dont_verify && mini_assembly_can_skip_verification (method)) {
dont_verify = TRUE;
dont_verify_stloc = TRUE;
}
if (sig->is_inflated)
generic_context = mono_method_get_context (method);
else if (generic_container)
generic_context = &generic_container->context;
cfg->generic_context = generic_context;
if (!cfg->gshared)
g_assert (!sig->has_type_parameters);
if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
g_assert (method->is_inflated);
g_assert (mono_method_get_context (method)->method_inst);
}
if (method->is_inflated && mono_method_get_context (method)->method_inst)
g_assert (sig->generic_param_count);
if (cfg->method == method) {
cfg->real_offset = 0;
} else {
cfg->real_offset = inline_offset;
}
cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
cfg->cil_offset_to_bb_len = header->code_size;
if (cfg->verbose_level > 2)
printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
if (sig->hasthis)
param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass);
for (n = 0; n < sig->param_count; ++n)
param_types [n + sig->hasthis] = sig->params [n];
cfg->arg_types = param_types;
cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
if (cfg->method == method) {
/* ENTRY BLOCK */
NEW_BBLOCK (cfg, start_bblock);
cfg->bb_entry = start_bblock;
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
/* EXIT BLOCK */
NEW_BBLOCK (cfg, end_bblock);
cfg->bb_exit = end_bblock;
end_bblock->cil_code = NULL;
end_bblock->cil_length = 0;
end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
g_assert (cfg->num_bblocks == 2);
arg_array = cfg->args;
if (header->num_clauses) {
cfg->spvars = g_hash_table_new (NULL, NULL);
cfg->exvars = g_hash_table_new (NULL, NULL);
}
cfg->clause_is_dead = mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * header->num_clauses);
/* handle exception clauses */
for (i = 0; i < header->num_clauses; ++i) {
MonoBasicBlock *try_bb;
MonoExceptionClause *clause = &header->clauses [i];
GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
try_bb->real_offset = clause->try_offset;
try_bb->try_start = TRUE;
GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
tblock->real_offset = clause->handler_offset;
tblock->flags |= BB_EXCEPTION_HANDLER;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
mono_create_exvar_for_offset (cfg, clause->handler_offset);
/*
* Linking the try block with the EH block hinders inlining as we won't be able to
* merge the bblocks from inlining and produce an artificial hole for no good reason.
*/
if (COMPILE_LLVM (cfg))
link_bblock (cfg, try_bb, tblock);
if (*(ip + clause->handler_offset) == CEE_POP)
tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
MONO_ADD_INS (tblock, ins);
if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
/* finally clauses already have a seq point */
/* seq points for filter clauses are emitted below */
NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
MONO_ADD_INS (tblock, ins);
}
/* todo: is a fault block unsafe to optimize? */
if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
tblock->flags |= BB_EXCEPTION_UNSAFE;
}
/*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
while (p < end) {
printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
}*/
/* catch and filter blocks get the exception object on the stack */
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
/* mostly like handle_stack_args (), but just sets the input args */
/* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
tblock->in_scount = 1;
tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
cfg->cbb = tblock;
#ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
/* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
if (!cfg->compile_llvm) {
MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
ins->dreg = tblock->in_stack [0]->dreg;
MONO_ADD_INS (tblock, ins);
}
#else
MonoInst *dummy_use;
/*
* Add a dummy use for the exvar so its liveness info will be
* correct.
*/
EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
#endif
if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
MONO_ADD_INS (tblock, ins);
}
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
tblock->flags |= BB_EXCEPTION_HANDLER;
tblock->real_offset = clause->data.filter_offset;
tblock->in_scount = 1;
tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
/* The filter block shares the exvar with the handler block */
tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
MONO_ADD_INS (tblock, ins);
}
}
if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
clause->data.catch_class &&
cfg->gshared &&
mono_class_check_context_used (clause->data.catch_class)) {
/*
* In shared generic code with catch
* clauses containing type variables
* the exception handling code has to
* be able to get to the rgctx.
* Therefore we have to make sure that
* the vtable/mrgctx argument (for
* static or generic methods) or the
* "this" argument (for non-static
* methods) are live.
*/
if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method)->method_inst ||
m_class_is_valuetype (method->klass)) {
mono_get_vtable_var (cfg);
} else {
MonoInst *dummy_use;
EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
}
}
}
} else {
arg_array = g_newa (MonoInst*, num_args);
cfg->cbb = start_bblock;
cfg->args = arg_array;
mono_save_args (cfg, sig, inline_args);
}
if (cfg->method == method && cfg->self_init && cfg->compile_aot && !COMPILE_LLVM (cfg)) {
MonoMethod *wrapper;
MonoInst *args [2];
int idx;
/*
* Emit code to initialize this method by calling the init wrapper emitted by LLVM.
* This is not efficient right now, but its only used for the methods which fail
* LLVM compilation.
* FIXME: Optimize this
*/
g_assert (!cfg->gshared);
wrapper = mono_marshal_get_aot_init_wrapper (AOT_INIT_METHOD);
/* Emit this into the entry bb so it comes before the GC safe point which depends on an inited GOT */
cfg->cbb = cfg->bb_entry;
idx = mono_aot_get_method_index (cfg->method);
EMIT_NEW_ICONST (cfg, args [0], idx);
/* Dummy */
EMIT_NEW_ICONST (cfg, args [1], 0);
mono_emit_method_call (cfg, wrapper, args, NULL);
}
if (cfg->llvm_only && cfg->interp && cfg->method == method && !cfg->deopt) {
if (header->num_clauses) {
for (int i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
/* Finally clauses are checked after the remove_finally pass */
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
cfg->interp_entry_only = TRUE;
}
}
}
/* we use a separate basic block for the initialization code */
NEW_BBLOCK (cfg, init_localsbb);
if (cfg->method == method)
cfg->bb_init = init_localsbb;
init_localsbb->real_offset = cfg->real_offset;
start_bblock->next_bb = init_localsbb;
link_bblock (cfg, start_bblock, init_localsbb);
init_localsbb2 = init_localsbb;
cfg->cbb = init_localsbb;
if (cfg->gsharedvt && cfg->method == method) {
MonoGSharedVtMethodInfo *info;
MonoInst *var, *locals_var;
int dreg;
info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
info->method = cfg->method;
info->count_entries = 16;
info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
cfg->gsharedvt_info = info;
var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
//var->flags |= MONO_INST_VOLATILE;
cfg->gsharedvt_info_var = var;
ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
/* Allocate locals */
locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
//locals_var->flags |= MONO_INST_VOLATILE;
cfg->gsharedvt_locals_var = locals_var;
dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
ins->dreg = locals_var->dreg;
ins->sreg1 = dreg;
MONO_ADD_INS (cfg->cbb, ins);
cfg->gsharedvt_locals_var_ins = ins;
cfg->flags |= MONO_CFG_HAS_ALLOCA;
/*
if (init_locals)
ins->flags |= MONO_INST_INIT;
*/
if (cfg->llvm_only) {
init_localsbb = cfg->cbb;
init_localsbb2 = cfg->cbb;
}
}
if (cfg->deopt) {
/*
* Push an LMFExt frame which points to a MonoMethodILState structure.
*/
emit_push_lmf (cfg);
/* The type doesn't matter, the llvm backend will use the correct type */
MonoInst *il_state_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
il_state_var->flags |= MONO_INST_VOLATILE;
cfg->il_state_var = il_state_var;
EMIT_NEW_VARLOADA (cfg, ins, cfg->il_state_var, NULL);
int il_state_addr_reg = ins->dreg;
/* il_state->method = method */
MonoInst *method_ins = emit_get_rgctx_method (cfg, -1, cfg->method, MONO_RGCTX_INFO_METHOD);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, il_state_addr_reg, MONO_STRUCT_OFFSET (MonoMethodILState, method), method_ins->dreg);
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
int lmf_reg = ins->dreg;
/* lmf->kind = MONO_LMFEXT_IL_STATE */
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, kind), MONO_LMFEXT_IL_STATE);
/* lmf->il_state = il_state */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMFExt, il_state), il_state_addr_reg);
/* emit_get_rgctx_method () might create new bblocks */
if (cfg->llvm_only) {
init_localsbb = cfg->cbb;
init_localsbb2 = cfg->cbb;
}
}
if (cfg->llvm_only && cfg->interp && cfg->method == method) {
if (cfg->interp_entry_only)
emit_llvmonly_interp_entry (cfg, header);
}
/* FIRST CODE BLOCK */
NEW_BBLOCK (cfg, tblock);
tblock->cil_code = ip;
cfg->cbb = tblock;
cfg->ip = ip;
init_localsbb->next_bb = cfg->cbb;
link_bblock (cfg, init_localsbb, cfg->cbb);
ADD_BBLOCK (cfg, tblock);
CHECK_CFG_EXCEPTION;
if (header->code_size == 0)
UNVERIFIED;
if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
ip = err_pos;
UNVERIFIED;
}
if (cfg->method == method) {
int breakpoint_id = mono_debugger_method_has_breakpoint (method);
if (breakpoint_id) {
MONO_INST_NEW (cfg, ins, OP_BREAK);
MONO_ADD_INS (cfg->cbb, ins);
}
mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
}
for (n = 0; n < header->num_locals; ++n) {
if (header->locals [n]->type == MONO_TYPE_VOID && !m_type_is_byref (header->locals [n]))
UNVERIFIED;
}
class_inits = NULL;
/* We force the vtable variable here for all shared methods
for the possibility that they might show up in a stack
trace where their exact instantiation is needed. */
if (cfg->gshared && method == cfg->method) {
if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method)->method_inst ||
m_class_is_valuetype (method->klass)) {
mono_get_vtable_var (cfg);
} else {
/* FIXME: Is there a better way to do this?
We need the variable live for the duration
of the whole method. */
cfg->args [0]->flags |= MONO_INST_VOLATILE;
}
}
/* add a check for this != NULL to inlined methods */
if (is_virtual_call) {
MonoInst *arg_ins;
//
// This is just a hack to avoid checks in empty methods which could get inlined
// into finally clauses preventing the removal of empty finally clauses, since all
// variables in finally clauses are marked volatile so the check can't be removed
//
if (!(cfg->llvm_only && m_class_is_valuetype (method->klass) && header->code_size == 1 && header->code [0] == CEE_RET)) {
NEW_ARGLOAD (cfg, arg_ins, 0);
MONO_ADD_INS (cfg->cbb, arg_ins);
MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
}
}
skip_dead_blocks = !dont_verify;
if (skip_dead_blocks) {
original_bb = bb = mono_basic_block_split (method, cfg->error, header);
CHECK_CFG_ERROR;
g_assert (bb);
}
/* we use a spare stack slot in SWITCH and NEWOBJ and others */
stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
ins_flag = 0;
start_new_bblock = 0;
MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid;
emit_set_deopt_il_offset (cfg, ip - cfg->cil_start);
for (guchar *next_ip = ip; ip < end; ip = next_ip) {
MonoOpcodeEnum previous_il_op = il_op;
const guchar *tmp_ip = ip;
const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op);
CHECK_OPSIZE (op_size);
next_ip += op_size;
if (cfg->method == method)
cfg->real_offset = ip - header->code;
else
cfg->real_offset = inline_offset;
cfg->ip = ip;
context_used = 0;
if (start_new_bblock) {
cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
if (start_new_bblock == 2) {
g_assert (ip == tblock->cil_code);
} else {
GET_BBLOCK (cfg, tblock, ip);
}
cfg->cbb->next_bb = tblock;
cfg->cbb = tblock;
start_new_bblock = 0;
for (i = 0; i < cfg->cbb->in_scount; ++i) {
if (cfg->verbose_level > 3)
printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
*sp++ = ins;
}
if (class_inits)
g_slist_free (class_inits);
class_inits = NULL;
emit_set_deopt_il_offset (cfg, ip - cfg->cil_start);
} else {
if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
link_bblock (cfg, cfg->cbb, tblock);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
cfg->cbb->next_bb = tblock;
cfg->cbb = tblock;
for (i = 0; i < cfg->cbb->in_scount; ++i) {
if (cfg->verbose_level > 3)
printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
*sp++ = ins;
}
g_slist_free (class_inits);
class_inits = NULL;
emit_set_deopt_il_offset (cfg, ip - cfg->cil_start);
}
}
/*
* Methods with AggressiveInline flag could be inlined even if the class has a cctor.
* This might create a branch so emit it in the first code bblock instead of into initlocals_bb.
*/
if (ip - header->code == 0 && cfg->method != method && cfg->compile_aot && (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && mono_class_needs_cctor_run (method->klass, method)) {
emit_class_init (cfg, method->klass);
}
if (skip_dead_blocks) {
int ip_offset = ip - header->code;
if (ip_offset == bb->end)
bb = bb->next;
if (bb->dead) {
g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
if (ip_offset + op_size == bb->end) {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
}
continue;
}
}
/*
* Sequence points are points where the debugger can place a breakpoint.
* Currently, we generate these automatically at points where the IL
* stack is empty.
*/
if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
/*
* Make methods interruptable at the beginning, and at the targets of
* backward branches.
* Also, do this at the start of every bblock in methods with clauses too,
* to be able to handle instructions with inprecise control flow like
* throw/endfinally.
* Backward branches are handled at the end of method-to-ir ().
*/
gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
/* Avoid sequence points on empty IL like .volatile */
// FIXME: Enable this
//if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
if ((sp != stack_start) && !sym_seq_point)
ins->flags |= MONO_INST_NONEMPTY_STACK;
MONO_ADD_INS (cfg->cbb, ins);
if (sym_seq_points)
mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
if (cfg->prof_coverage) {
guint32 cil_offset = ip - header->code;
gpointer counter = &cfg->coverage_info->data [cil_offset].count;
cfg->coverage_info->data [cil_offset].cil_code = ip;
if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
MonoInst *one_ins, *load_ins;
EMIT_NEW_PCONST (cfg, load_ins, counter);
EMIT_NEW_ICONST (cfg, one_ins, 1);
MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = load_ins->dreg;
ins->inst_offset = 0;
ins->sreg2 = one_ins->dreg;
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
} else {
EMIT_NEW_PCONST (cfg, ins, counter);
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
}
}
}
cfg->cbb->real_offset = cfg->real_offset;
if (cfg->verbose_level > 3)
printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
/*
* This is used to compute BB_HAS_SIDE_EFFECTS, which is used for the elimination of
* foreach finally clauses, so only IL opcodes which occur in such clauses
* need to set this.
*/
ins_has_side_effect = TRUE;
// Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP.
// Initialize to either what they all need or zero.
gboolean emit_widen = TRUE;
gboolean tailcall = FALSE;
gboolean common_call = FALSE;
MonoInst *keep_this_alive = NULL;
MonoMethod *cmethod = NULL;
MonoMethodSignature *fsig = NULL;
// These are used only in CALL/CALLVIRT but must be initialized also for CALLI,
// since it jumps into CALL/CALLVIRT.
gboolean need_seq_point = FALSE;
gboolean push_res = TRUE;
gboolean skip_ret = FALSE;
gboolean tailcall_remove_ret = FALSE;
// FIXME split 500 lines load/store field into separate file/function.
MonoOpcodeParameter parameter;
const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, ¶meter);
g_assert (info);
n = parameter.i32;
token = parameter.i32;
target = parameter.branch_target;
// Check stack size for push/pop except variable cases -- -1 like call/ret/newobj.
const int pushes = info->pushes;
const int pops = info->pops;
if (pushes >= 0 && pops >= 0) {
g_assert (pushes - pops <= 1);
if (pushes - pops == 1)
CHECK_STACK_OVF ();
}
if (pops >= 0)
CHECK_STACK (pops);
switch (il_op) {
case MONO_CEE_NOP:
if (seq_points && !sym_seq_points && sp != stack_start) {
/*
* The C# compiler uses these nops to notify the JIT that it should
* insert seq points.
*/
NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
MONO_ADD_INS (cfg->cbb, ins);
}
if (cfg->keep_cil_nops)
MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
else
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
emitted_funccall_seq_point = FALSE;
ins_has_side_effect = FALSE;
break;
case MONO_CEE_BREAK:
if (mini_should_insert_breakpoint (cfg->method)) {
ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
} else {
MONO_INST_NEW (cfg, ins, OP_NOP);
MONO_ADD_INS (cfg->cbb, ins);
}
break;
case MONO_CEE_LDARG_0:
case MONO_CEE_LDARG_1:
case MONO_CEE_LDARG_2:
case MONO_CEE_LDARG_3:
case MONO_CEE_LDARG_S:
case MONO_CEE_LDARG:
CHECK_ARG (n);
if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) {
EMIT_NEW_ARGLOADA (cfg, ins, n);
} else {
EMIT_NEW_ARGLOAD (cfg, ins, n);
}
*sp++ = ins;
break;
case MONO_CEE_LDLOC_0:
case MONO_CEE_LDLOC_1:
case MONO_CEE_LDLOC_2:
case MONO_CEE_LDLOC_3:
case MONO_CEE_LDLOC_S:
case MONO_CEE_LDLOC:
CHECK_LOCAL (n);
if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) {
EMIT_NEW_LOCLOADA (cfg, ins, n);
} else {
EMIT_NEW_LOCLOAD (cfg, ins, n);
}
*sp++ = ins;
break;
case MONO_CEE_STLOC_0:
case MONO_CEE_STLOC_1:
case MONO_CEE_STLOC_2:
case MONO_CEE_STLOC_3:
case MONO_CEE_STLOC_S:
case MONO_CEE_STLOC:
CHECK_LOCAL (n);
--sp;
*sp = convert_value (cfg, header->locals [n], *sp);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
UNVERIFIED;
emit_stloc_ir (cfg, sp, header, n);
inline_costs += 1;
break;
case MONO_CEE_LDARGA_S:
case MONO_CEE_LDARGA:
CHECK_ARG (n);
NEW_ARGLOADA (cfg, ins, n);
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_STARG_S:
case MONO_CEE_STARG:
--sp;
CHECK_ARG (n);
*sp = convert_value (cfg, param_types [n], *sp);
if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
UNVERIFIED;
emit_starg_ir (cfg, sp, n);
break;
case MONO_CEE_LDLOCA:
case MONO_CEE_LDLOCA_S: {
guchar *tmp_ip;
CHECK_LOCAL (n);
if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) {
next_ip = tmp_ip;
il_op = MONO_CEE_INITOBJ;
inline_costs += 1;
break;
}
ins_has_side_effect = FALSE;
EMIT_NEW_LOCLOADA (cfg, ins, n);
*sp++ = ins;
break;
}
case MONO_CEE_LDNULL:
EMIT_NEW_PCONST (cfg, ins, NULL);
ins->type = STACK_OBJ;
*sp++ = ins;
break;
case MONO_CEE_LDC_I4_M1:
case MONO_CEE_LDC_I4_0:
case MONO_CEE_LDC_I4_1:
case MONO_CEE_LDC_I4_2:
case MONO_CEE_LDC_I4_3:
case MONO_CEE_LDC_I4_4:
case MONO_CEE_LDC_I4_5:
case MONO_CEE_LDC_I4_6:
case MONO_CEE_LDC_I4_7:
case MONO_CEE_LDC_I4_8:
case MONO_CEE_LDC_I4_S:
case MONO_CEE_LDC_I4:
EMIT_NEW_ICONST (cfg, ins, n);
*sp++ = ins;
break;
case MONO_CEE_LDC_I8:
MONO_INST_NEW (cfg, ins, OP_I8CONST);
ins->type = STACK_I8;
ins->dreg = alloc_dreg (cfg, STACK_I8);
ins->inst_l = parameter.i64;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_LDC_R4: {
float *f;
gboolean use_aotconst = FALSE;
#ifdef TARGET_POWERPC
/* FIXME: Clean this up */
if (cfg->compile_aot)
use_aotconst = TRUE;
#endif
/* FIXME: we should really allocate this only late in the compilation process */
f = (float *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (float));
if (use_aotconst) {
MonoInst *cons;
int dreg;
EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
dreg = alloc_freg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
ins->type = cfg->r4_stack_type;
} else {
MONO_INST_NEW (cfg, ins, OP_R4CONST);
ins->type = cfg->r4_stack_type;
ins->dreg = alloc_dreg (cfg, STACK_R8);
ins->inst_p0 = f;
MONO_ADD_INS (cfg->cbb, ins);
}
*f = parameter.f;
*sp++ = ins;
break;
}
case MONO_CEE_LDC_R8: {
double *d;
gboolean use_aotconst = FALSE;
#ifdef TARGET_POWERPC
/* FIXME: Clean this up */
if (cfg->compile_aot)
use_aotconst = TRUE;
#endif
/* FIXME: we should really allocate this only late in the compilation process */
d = (double *)mono_mem_manager_alloc (cfg->mem_manager, sizeof (double));
if (use_aotconst) {
MonoInst *cons;
int dreg;
EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
dreg = alloc_freg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
ins->type = STACK_R8;
} else {
MONO_INST_NEW (cfg, ins, OP_R8CONST);
ins->type = STACK_R8;
ins->dreg = alloc_dreg (cfg, STACK_R8);
ins->inst_p0 = d;
MONO_ADD_INS (cfg->cbb, ins);
}
*d = parameter.d;
*sp++ = ins;
break;
}
case MONO_CEE_DUP: {
MonoInst *temp, *store;
MonoClass *klass;
sp--;
ins = *sp;
klass = ins->klass;
temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
ins->klass = klass;
*sp++ = ins;
EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
ins->klass = klass;
*sp++ = ins;
inline_costs += 2;
break;
}
case MONO_CEE_POP:
--sp;
#ifdef TARGET_X86
if (sp [0]->type == STACK_R8)
/* we need to pop the value from the x86 FP stack */
MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
#endif
break;
case MONO_CEE_JMP: {
MonoCallInst *call;
int i, n;
INLINE_FAILURE ("jmp");
GSHAREDVT_FAILURE (il_op);
if (stack_start != sp)
UNVERIFIED;
/* FIXME: check the signature matches */
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
CHECK_CFG_ERROR;
if (cfg->gshared && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
mini_profiler_emit_tail_call (cfg, cmethod);
fsig = mono_method_signature_internal (cmethod);
n = fsig->param_count + fsig->hasthis;
if (cfg->llvm_only) {
MonoInst **args;
args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
for (i = 0; i < n; ++i)
EMIT_NEW_ARGLOAD (cfg, args [i], i);
ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
/*
* The code in mono-basic-block.c treats the rest of the code as dead, but we
* have to emit a normal return since llvm expects it.
*/
if (cfg->ret)
emit_setret (cfg, ins);
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, end_bblock);
break;
} else {
/* Handle tailcalls similarly to calls */
DISABLE_AOT (cfg);
mini_emit_tailcall_parameters (cfg, fsig);
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
call->method = cmethod;
// FIXME Other initialization of the tailcall field occurs after
// it is used. So this is the only "real" use and needs more attention.
call->tailcall = TRUE;
call->signature = fsig;
call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
call->inst.inst_p0 = cmethod;
for (i = 0; i < n; ++i)
EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
call->vret_var = cfg->vret_addr;
mono_arch_emit_call (cfg, call);
cfg->param_area = MAX(cfg->param_area, call->stack_usage);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
}
start_new_bblock = 1;
break;
}
case MONO_CEE_CALLI: {
// FIXME tail.calli is problemetic because the this pointer's type
// is not in the signature, and we cannot check for a byref valuetype.
MonoInst *addr;
MonoInst *callee = NULL;
// Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic
cmethod = NULL;
gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
? (next_ip < end && next_ip [0] == CEE_RET)
: ((ins_flag & MONO_INST_TAILCALL) != 0));
ins = NULL;
//GSHAREDVT_FAILURE (il_op);
CHECK_STACK (1);
--sp;
addr = *sp;
g_assert (addr);
fsig = mini_get_signature (method, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
if (method->dynamic && fsig->pinvoke) {
MonoInst *args [3];
/*
* This is a call through a function pointer using a pinvoke
* signature. Have to create a wrapper and call that instead.
* FIXME: This is very slow, need to create a wrapper at JIT time
* instead based on the signature.
*/
EMIT_NEW_IMAGECONST (cfg, args [0], ((MonoDynamicMethod*)method)->assembly->image);
EMIT_NEW_PCONST (cfg, args [1], fsig);
args [2] = addr;
// FIXME tailcall?
addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
}
if (!method->dynamic && fsig->pinvoke &&
!method->wrapper_type) {
/* MONO_WRAPPER_DYNAMIC_METHOD dynamic method handled above in the
method->dynamic case; for other wrapper types assume the code knows
what its doing and added its own GC transitions */
gboolean skip_gc_trans = fsig->suppress_gc_transition;
if (!skip_gc_trans) {
#if 0
fprintf (stderr, "generating wrapper for calli in method %s with wrapper type %s\n", method->name, mono_wrapper_type_to_str (method->wrapper_type));
#endif
/* Call the wrapper that will do the GC transition instead */
MonoMethod *wrapper = mono_marshal_get_native_func_wrapper_indirect (method->klass, fsig, cfg->compile_aot);
fsig = mono_method_signature_internal (wrapper);
n = fsig->param_count - 1; /* wrapper has extra fnptr param */
CHECK_STACK (n);
/* move the args to allow room for 'this' in the first position */
while (n--) {
--sp;
sp [1] = sp [0];
}
sp[0] = addr; /* n+1 args, first arg is the address of the indirect method to call */
g_assert (!fsig->hasthis && !fsig->pinvoke);
ins = mono_emit_method_call (cfg, wrapper, /*args*/sp, NULL);
goto calli_end;
}
}
n = fsig->param_count + fsig->hasthis;
CHECK_STACK (n);
//g_assert (!virtual_ || fsig->hasthis);
sp -= n;
if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) {
if (break_on_unverified ())
check_call_signature (cfg, fsig, sp); // Again, step through it.
UNVERIFIED;
}
inline_costs += CALL_COST * MIN(10, num_calls++);
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
/*
* We pass the address to the gsharedvt trampoline in the rgctx reg
*/
callee = addr;
g_assert (addr); // Doubles as boolean after tailcall check.
}
inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig,
FALSE/*virtual irrelevant*/, addr != NULL, &tailcall);
if (save_last_error)
mono_emit_jit_icall (cfg, mono_marshal_clear_last_error, NULL);
if (callee) {
if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
/* Not tested */
GSHAREDVT_FAILURE (il_op);
if (cfg->llvm_only)
// FIXME:
GSHAREDVT_FAILURE (il_op);
addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall);
goto calli_end;
}
/* Prevent inlining of methods with indirect calls */
INLINE_FAILURE ("indirect call");
if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
MonoJumpInfoType info_type;
gpointer info_data;
/*
* Instead of emitting an indirect call, emit a direct call
* with the contents of the aotconst as the patch info.
*/
if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
info_type = (MonoJumpInfoType)addr->inst_c1;
info_data = addr->inst_p0;
} else {
info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
info_data = addr->inst_right->inst_left;
}
if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
// non-JIT icall, mostly builtin, but also user-extensible
tailcall = FALSE;
ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
NULLIFY_INS (addr);
goto calli_end;
} else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR
|| info_type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR) {
tailcall = FALSE;
ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp);
NULLIFY_INS (addr);
goto calli_end;
}
}
if (cfg->llvm_only && !(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD))
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
else
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall);
goto calli_end;
}
case MONO_CEE_CALL:
case MONO_CEE_CALLVIRT: {
MonoInst *addr; addr = NULL;
int array_rank; array_rank = 0;
gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT;
gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE;
MonoInst *imt_arg; imt_arg = NULL;
gboolean pass_vtable; pass_vtable = FALSE;
gboolean pass_mrgctx; pass_mrgctx = FALSE;
MonoInst *vtable_arg; vtable_arg = NULL;
gboolean check_this; check_this = FALSE;
gboolean delegate_invoke; delegate_invoke = FALSE;
gboolean direct_icall; direct_icall = FALSE;
gboolean tailcall_calli; tailcall_calli = FALSE;
gboolean noreturn; noreturn = FALSE;
gboolean gshared_static_virtual; gshared_static_virtual = FALSE;
#ifdef TARGET_WASM
gboolean needs_stack_walk; needs_stack_walk = FALSE;
#endif
// Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
common_call = FALSE;
// variables to help in assertions
gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE;
MonoMethod *tailcall_method; tailcall_method = NULL;
MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL;
MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL;
gboolean tailcall_virtual; tailcall_virtual = FALSE;
gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE;
gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
? (next_ip < end && next_ip [0] == CEE_RET)
: ((ins_flag & MONO_INST_TAILCALL) != 0));
ins = NULL;
/* Used to pass arguments to called functions */
HandleCallData cdata;
memset (&cdata, 0, sizeof (HandleCallData));
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
CHECK_CFG_ERROR;
if (cfg->verbose_level > 3)
printf ("cmethod = %s\n", mono_method_get_full_name (cmethod));
MonoMethod *cil_method; cil_method = cmethod;
if (constrained_class) {
if (m_method_is_static (cil_method) && mini_class_check_context_used (cfg, constrained_class)) {
/* get_constrained_method () doesn't work on the gparams used by generic sharing */
// FIXME: Other configurations
//if (!cfg->gsharedvt)
// GENERIC_SHARING_FAILURE (CEE_CALL);
gshared_static_virtual = TRUE;
} else {
cmethod = get_constrained_method (cfg, image, token, cil_method, constrained_class, generic_context);
CHECK_CFG_ERROR;
if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) {
/* Use the corresponding method from the base type to avoid boxing */
MonoType *base_type = mono_class_enum_basetype_internal (constrained_class);
g_assert (base_type);
constrained_class = mono_class_from_mono_type_internal (base_type);
cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0);
g_assert (cmethod);
}
}
}
if (!dont_verify && !cfg->skip_visibility) {
MonoMethod *target_method = cil_method;
if (method->is_inflated) {
MonoGenericContainer *container = mono_method_get_generic_container(method_definition);
MonoGenericContext *context = (container != NULL ? &container->context : NULL);
target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error);
CHECK_CFG_ERROR;
}
if (!mono_method_can_access_method (method_definition, target_method) &&
!mono_method_can_access_method (method, cil_method))
emit_method_access_failure (cfg, method, cil_method);
}
if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
if (cfg->interp && !cfg->interp_entry_only) {
/* Use the interpreter instead */
cfg->exception_message = g_strdup ("stack walk");
cfg->disable_llvm = TRUE;
}
#ifdef TARGET_WASM
else {
needs_stack_walk = TRUE;
}
#endif
}
if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT) && !gshared_static_virtual) {
if (!mono_class_is_interface (method->klass))
emit_bad_image_failure (cfg, method, cil_method);
else
virtual_ = TRUE;
}
if (!m_class_is_inited (cmethod->klass))
if (!mono_class_init_internal (cmethod->klass))
TYPE_LOAD_ERROR (cmethod->klass);
fsig = mono_method_signature_internal (cmethod);
if (!fsig)
LOAD_ERROR;
if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
mini_class_is_system_array (cmethod->klass)) {
array_rank = m_class_get_rank (cmethod->klass);
} else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg, cmethod)) {
direct_icall = TRUE;
} else if (fsig->pinvoke) {
if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) {
/*
* Avoid calling mono_marshal_get_native_wrapper () too early, it might call managed
* callbacks on netcore.
*/
fsig = mono_metadata_signature_dup_mempool (cfg->mempool, fsig);
fsig->pinvoke = FALSE;
} else {
MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
fsig = mono_method_signature_internal (wrapper);
}
} else if (constrained_class) {
} else {
fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
}
if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
/* See code below */
if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
MonoBasicBlock *tbb;
GET_BBLOCK (cfg, tbb, next_ip);
if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
/*
* We want to extend the try block to cover the call, but we can't do it if the
* call is made directly since its followed by an exception check.
*/
direct_icall = FALSE;
}
}
mono_save_token_info (cfg, image, token, cil_method);
if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
need_seq_point = TRUE;
/* Don't support calls made using type arguments for now */
/*
if (cfg->gsharedvt) {
if (mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (il_op);
}
*/
if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
g_assert_not_reached ();
n = fsig->param_count + fsig->hasthis;
if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
UNVERIFIED;
if (!cfg->gshared)
g_assert (!mono_method_check_context_used (cmethod));
CHECK_STACK (n);
//g_assert (!virtual_ || fsig->hasthis);
sp -= n;
if (virtual_ && cmethod && sp [0] && sp [0]->opcode == OP_TYPED_OBJREF) {
ERROR_DECL (error);
MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, error);
if (is_ok (error)) {
cmethod = new_cmethod;
virtual_ = FALSE;
} else {
mono_error_cleanup (error);
}
}
if (cmethod && method_does_not_return (cmethod)) {
cfg->cbb->out_of_line = TRUE;
noreturn = TRUE;
}
cdata.method = method;
cdata.inst_tailcall = inst_tailcall;
/*
* We have the `constrained.' prefix opcode.
*/
if (constrained_class) {
ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen);
CHECK_CFG_EXCEPTION;
if (!gshared_static_virtual)
constrained_class = NULL;
if (ins)
goto call_end;
}
for (int i = 0; i < fsig->param_count; ++i)
sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
if (check_call_signature (cfg, fsig, sp)) {
if (break_on_unverified ())
check_call_signature (cfg, fsig, sp); // Again, step through it.
UNVERIFIED;
}
if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
delegate_invoke = TRUE;
/*
* Implement a workaround for the inherent races involved in locking:
* Monitor.Enter ()
* try {
* } finally {
* Monitor.Exit ()
* }
* If a thread abort happens between the call to Monitor.Enter () and the start of the
* try block, the Exit () won't be executed, see:
* http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
* To work around this, we extend such try blocks to include the last x bytes
* of the Monitor.Enter () call.
*/
if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
MonoBasicBlock *tbb;
GET_BBLOCK (cfg, tbb, next_ip);
/*
* Only extend try blocks with a finally, to avoid catching exceptions thrown
* from Monitor.Enter like ArgumentNullException.
*/
if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
/* Mark this bblock as needing to be extended */
tbb->extend_try_block = TRUE;
}
}
/* Conversion to a JIT intrinsic */
gboolean ins_type_initialized;
if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp, &ins_type_initialized))) {
if (!MONO_TYPE_IS_VOID (fsig->ret)) {
if (!ins_type_initialized)
mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
emit_widen = FALSE;
}
// FIXME This is only missed if in fact the intrinsic involves a call.
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
CHECK_CFG_ERROR;
/*
* If the callee is a shared method, then its static cctor
* might not get called after the call was patched.
*/
if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
/* Inlining */
if ((cfg->opt & MONO_OPT_INLINE) && !inst_tailcall &&
(!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
mono_method_check_inlining (cfg, cmethod)) {
int costs;
gboolean always = FALSE;
gboolean is_empty = FALSE;
if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) {
/* Prevent inlining of methods that call wrappers */
INLINE_FAILURE ("wrapper call");
// FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
// Neither pinvoke or icall are likely to be tailcalled.
cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
always = TRUE;
}
costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &is_empty);
if (costs) {
cfg->real_offset += 5;
if (!MONO_TYPE_IS_VOID (fsig->ret))
/* *sp is already set by inline_method */
ins = *sp;
inline_costs += costs;
// FIXME This is missed if the inlinee contains tail calls that
// would work, but not once inlined into caller.
// This matchingness could be a factor in inlining.
// i.e. Do not inline if it hurts tailcall, do inline
// if it helps and/or or is neutral, and helps performance
// using usual heuristics.
// Note that inlining will expose multiple tailcall opportunities
// so the tradeoff is not obvious. If we can tailcall anything
// like desktop, then this factor mostly falls away, except
// that inlining can affect tailcall performance due to
// signature match/mismatch.
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name);
if (is_empty)
ins_has_side_effect = FALSE;
goto call_end;
}
}
check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
if (cfg->gshared) {
MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
context_used = mini_method_check_context_used (cfg, cmethod);
if (!context_used && gshared_static_virtual)
context_used = mini_class_check_context_used (cfg, constrained_class);
if (context_used && mono_class_is_interface (cmethod->klass) && !m_method_is_static (cmethod)) {
/* Generic method interface
calls are resolved via a
helper function and don't
need an imt. */
if (!cmethod_context || !cmethod_context->method_inst)
pass_imt_from_rgctx = TRUE;
}
/*
* If a shared method calls another
* shared method then the caller must
* have a generic sharing context
* because the magic trampoline
* requires it. FIXME: We shouldn't
* have to force the vtable/mrgctx
* variable here. Instead there
* should be a flag in the cfg to
* request a generic sharing context.
*/
if (context_used &&
((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass)))
mono_get_vtable_var (cfg);
}
if (pass_vtable) {
if (context_used) {
vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
} else {
MonoVTable *vtable = mono_class_vtable_checked (cmethod->klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
}
}
if (pass_mrgctx) {
g_assert (!vtable_arg);
if (!cfg->compile_aot) {
/*
* emit_get_rgctx_method () calls mono_class_vtable () so check
* for type load errors before.
*/
mono_class_setup_vtable (cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod))) {
if (virtual_)
check_this = TRUE;
virtual_ = FALSE;
}
}
if (pass_imt_from_rgctx) {
g_assert (!pass_vtable);
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
}
if (check_this)
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
/* Calling virtual generic methods */
// These temporaries help detangle "pure" computation of
// inputs to is_supported_tailcall from side effects, so that
// is_supported_tailcall can be computed just once.
gboolean virtual_generic; virtual_generic = FALSE;
gboolean virtual_generic_imt; virtual_generic_imt = FALSE;
if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
!MONO_METHOD_IS_FINAL (cmethod) &&
fsig->generic_param_count &&
!(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
!cfg->llvm_only) {
g_assert (fsig->is_inflated);
virtual_generic = TRUE;
/* Prevent inlining of methods that contain indirect calls */
INLINE_FAILURE ("virtual generic call");
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
GSHAREDVT_FAILURE (il_op);
if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
virtual_generic_imt = TRUE;
g_assert (!imt_arg);
if (!context_used)
g_assert (cmethod->is_inflated);
imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
virtual_ = TRUE;
vtable_arg = NULL;
}
}
// Capture some intent before computing tailcall.
gboolean make_generic_call_out_of_gsharedvt_method;
gboolean will_have_imt_arg;
make_generic_call_out_of_gsharedvt_method = FALSE;
will_have_imt_arg = FALSE;
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
!(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) &&
(!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
make_generic_call_out_of_gsharedvt_method = TRUE;
if (virtual_) {
if (fsig->generic_param_count) {
will_have_imt_arg = TRUE;
} else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
will_have_imt_arg = TRUE;
}
}
}
/* Tail prefix / tailcall optimization */
/* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests.
Inlining and stack traces are not guaranteed however. */
/* FIXME: runtime generic context pointer for jumps? */
/* FIXME: handle this for generic sharing eventually */
// tailcall means "the backend can and will handle it".
// inst_tailcall means the tail. prefix is present.
tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass);
tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig,
virtual_, tailcall_extra_arg, &tailcall_calli);
// Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall).
// Capture values to later assert they don't change.
called_is_supported_tailcall = TRUE;
tailcall_method = method;
tailcall_cmethod = cmethod;
tailcall_fsig = fsig;
tailcall_virtual = virtual_;
if (virtual_generic) {
if (virtual_generic_imt) {
if (tailcall) {
/* Prevent inlining of methods with tailcalls (the call stack would be altered) */
INLINE_FAILURE ("tailcall");
}
common_call = TRUE;
goto call_end;
}
MonoInst *this_temp, *this_arg_temp, *store;
MonoInst *iargs [4];
this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
MONO_ADD_INS (cfg->cbb, store);
/* FIXME: This should be a managed pointer */
this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
CHECK_CFG_ERROR;
/* Tail recursion elimination */
if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) {
gboolean has_vtargs = FALSE;
int i;
/* Prevent inlining of methods with tailcalls (the call stack would be altered) */
INLINE_FAILURE ("tailcall");
/* keep it simple */
for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--)
has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]);
if (!has_vtargs) {
if (need_seq_point) {
emit_seq_point (cfg, method, ip, FALSE, TRUE);
need_seq_point = FALSE;
}
for (i = 0; i < n; ++i)
EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
mini_profiler_emit_tail_call (cfg, cmethod);
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (cfg->cbb, ins);
tblock = start_bblock->out_bb [0];
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
start_new_bblock = 1;
/* skip the CEE_RET, too */
if (ip_in_bb (cfg, cfg->cbb, next_ip))
skip_ret = TRUE;
push_res = FALSE;
need_seq_point = FALSE;
goto call_end;
}
}
inline_costs += CALL_COST * MIN(10, num_calls++);
/*
* Synchronized wrappers.
* Its hard to determine where to replace a method with its synchronized
* wrapper without causing an infinite recursion. The current solution is
* to add the synchronized wrapper in the trampolines, and to
* change the called method to a dummy wrapper, and resolve that wrapper
* to the real method in mono_jit_compile_method ().
*/
if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) {
// FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
}
}
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
* patching gshared method addresses into a gsharedvt method.
*/
if (make_generic_call_out_of_gsharedvt_method) {
if (virtual_) {
//if (mono_class_is_interface (cmethod->klass))
//GSHAREDVT_FAILURE (il_op);
// disable for possible remoting calls
if (fsig->hasthis && method->klass == mono_defaults.object_class)
GSHAREDVT_FAILURE (il_op);
if (fsig->generic_param_count) {
/* virtual generic call */
g_assert (!imt_arg);
g_assert (will_have_imt_arg);
/* Same as the virtual generic case above */
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
} else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
/* This can happen when we call a fully instantiated iface method */
g_assert (will_have_imt_arg);
imt_arg = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
g_assert (imt_arg);
}
/* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
vtable_arg = NULL;
}
if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
keep_this_alive = sp [0];
MonoRgctxInfoType info_type;
if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
else
info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
if (cfg->llvm_only) {
// FIXME: Avoid initializing vtable_arg
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name);
} else {
tailcall = tailcall_calli;
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
tailcall_remove_ret |= tailcall;
}
goto call_end;
}
/* Generic sharing */
/*
* Calls to generic methods from shared code cannot go through the trampoline infrastructure
* in some cases, because the called method might end up being different on every call.
* Load the called method address from the rgctx and do an indirect call in these cases.
* Use this if the callee is gsharedvt sharable too, since
* at runtime we might find an instantiation so the call cannot
* be patched (the 'no_patch' code path in mini-trampolines.c).
*/
gboolean gshared_indirect;
gshared_indirect = context_used && !imt_arg && !array_rank && !delegate_invoke;
if (gshared_indirect)
gshared_indirect = (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
!mono_class_generic_sharing_enabled (cmethod->klass) ||
gshared_static_virtual);
if (gshared_indirect)
gshared_indirect = (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL));
if (gshared_indirect) {
INLINE_FAILURE ("gshared");
g_assert (cfg->gshared && cmethod);
g_assert (!addr);
if (fsig->hasthis)
MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
if (cfg->llvm_only) {
if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
/* Handled in handle_constrained_gsharedvt_call () */
g_assert (!gshared_static_virtual);
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
} else {
if (gshared_static_virtual)
addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
else
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC);
}
// FIXME: Avoid initializing imt_arg/vtable_arg
ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name);
} else {
if (gshared_static_virtual) {
/*
* cmethod is a static interface method, the actual called method at runtime
* needs to be computed using constrained_class and cmethod.
*/
addr = emit_get_rgctx_virt_method (cfg, -1, constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
} else {
addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
}
if (inst_tailcall)
mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name);
tailcall = tailcall_calli;
ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
tailcall_remove_ret |= tailcall;
}
goto call_end;
}
/* Direct calls to icalls */
if (direct_icall) {
MonoMethod *wrapper;
int costs;
/* Inline the wrapper */
wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, NULL);
g_assert (costs > 0);
cfg->real_offset += 5;
if (!MONO_TYPE_IS_VOID (fsig->ret))
/* *sp is already set by inline_method */
ins = *sp;
inline_costs += costs;
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
/* Array methods */
if (array_rank) {
MonoInst *addr;
if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
MonoInst *val = sp [fsig->param_count];
if (val->type == STACK_OBJ) {
MonoInst *iargs [ ] = { sp [0], val };
mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
}
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
if (!mini_debug_options.weak_memory_model && val->type == STACK_OBJ)
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
mini_emit_write_barrier (cfg, addr, val);
if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
GSHAREDVT_FAILURE (il_op);
} else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
} else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly)
mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
readonly = FALSE;
addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
ins = addr;
} else {
g_assert_not_reached ();
}
emit_widen = FALSE;
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
if (ins) {
if (inst_tailcall) // FIXME
mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name);
goto call_end;
}
/* Tail prefix / tailcall optimization */
if (tailcall) {
/* Prevent inlining of methods with tailcalls (the call stack would be altered) */
INLINE_FAILURE ("tailcall");
}
/*
* Virtual calls in llvm-only mode.
*/
if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
goto call_end;
}
/* Common call */
if (!(cfg->opt & MONO_OPT_AGGRESSIVE_INLINING) && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !method_does_not_return (cmethod))
INLINE_FAILURE ("call");
common_call = TRUE;
#ifdef TARGET_WASM
/* Push an LMF so these frames can be enumerated during stack walks by mono_arch_unwind_frame () */
if (needs_stack_walk && !cfg->deopt) {
MonoInst *method_ins;
int lmf_reg;
emit_push_lmf (cfg);
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
lmf_reg = ins->dreg;
/* The lmf->method field will be used to look up the MonoJitInfo for this method */
method_ins = emit_get_rgctx_method (cfg, mono_method_check_context_used (cfg->method), cfg->method, MONO_RGCTX_INFO_METHOD);
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, method), method_ins->dreg);
}
#endif
call_end:
// Check that the decision to tailcall would not have changed.
g_assert (!called_is_supported_tailcall || tailcall_method == method);
// FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway.
// If this still fails, restructure the code, or call tailcall_supported again and assert no change.
g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod);
g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig);
g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_);
g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass)));
if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing.
ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL,
imt_arg, vtable_arg);
/*
* Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C
* call can be devirtualized above.
*/
if (cmethod)
ins = handle_call_res_devirt (cfg, cmethod, ins);
#ifdef TARGET_WASM
if (common_call && needs_stack_walk && !cfg->deopt)
/* If an exception is thrown, the LMF is popped by a call to mini_llvmonly_pop_lmf () */
emit_pop_lmf (cfg);
#endif
if (noreturn) {
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
}
calli_end:
if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) {
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
// FIXME: Eliminate unreachable epilogs
/*
* OP_TAILCALL has no return value, so skip the CEE_RET if it is
* only reachable from this call.
*/
GET_BBLOCK (cfg, tblock, next_ip);
if (tblock == cfg->cbb || tblock->in_count == 0)
skip_ret = TRUE;
push_res = FALSE;
need_seq_point = FALSE;
}
if (ins_flag & MONO_INST_TAILCALL)
mini_test_tailcall (cfg, tailcall);
/* End of call, INS should contain the result of the call, if any */
if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
g_assert (ins);
if (emit_widen)
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
else
*sp++ = ins;
}
if (save_last_error) {
save_last_error = FALSE;
#ifdef TARGET_WIN32
// Making icalls etc could clobber the value so emit inline code
// to read last error on Windows.
MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
ins->dreg = alloc_dreg (cfg, STACK_I4);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
mono_emit_jit_icall (cfg, mono_marshal_set_last_error_windows, &ins);
#else
mono_emit_jit_icall (cfg, mono_marshal_set_last_error, NULL);
#endif
}
if (keep_this_alive) {
MonoInst *dummy_use;
/* See mini_emit_method_call_full () */
EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
}
if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
/*
* Clang can convert these calls to tailcalls which screw up the stack
* walk. This happens even when the -fno-optimize-sibling-calls
* option is passed to clang.
* Work around this by emitting a dummy call.
*/
mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
}
CHECK_CFG_EXCEPTION;
if (skip_ret) {
// FIXME When not followed by CEE_RET, correct behavior is to raise an exception.
g_assert (next_ip [0] == CEE_RET);
next_ip += 1;
il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear.
}
ins_flag = 0;
constrained_class = NULL;
if (need_seq_point) {
//check is is a nested call and remove the non_empty_stack of the last call, only for non native methods
if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) {
if (emitted_funccall_seq_point) {
if (cfg->last_seq_point)
cfg->last_seq_point->flags |= MONO_INST_NESTED_CALL;
}
else
emitted_funccall_seq_point = TRUE;
}
emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
}
break;
}
case MONO_CEE_RET:
if (!detached_before_ret)
mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
g_assert (!method_does_not_return (method));
if (cfg->method != method) {
/* return from inlined method */
/*
* If in_count == 0, that means the ret is unreachable due to
* being preceded by a throw. In that case, inline_method () will
* handle setting the return value
* (test case: test_0_inline_throw ()).
*/
if (return_var && cfg->cbb->in_count) {
MonoType *ret_type = mono_method_signature_internal (method)->ret;
MonoInst *store;
CHECK_STACK (1);
--sp;
*sp = convert_value (cfg, ret_type, *sp);
if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
UNVERIFIED;
//g_assert (returnvar != -1);
EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
cfg->ret_var_set = TRUE;
}
} else {
if (cfg->lmf_var && cfg->cbb->in_count && (!cfg->llvm_only || cfg->deopt))
emit_pop_lmf (cfg);
if (cfg->ret) {
MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (seq_points && !sym_seq_points) {
/*
* Place a seq point here too even through the IL stack is not
* empty, so a step over on
* call <FOO>
* ret
* will work correctly.
*/
NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
MONO_ADD_INS (cfg->cbb, ins);
}
g_assert (!return_var);
CHECK_STACK (1);
--sp;
*sp = convert_value (cfg, ret_type, *sp);
if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
UNVERIFIED;
emit_setret (cfg, *sp);
}
}
if (sp != stack_start)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
case MONO_CEE_BR_S:
MONO_INST_NEW (cfg, ins, OP_BR);
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_BEQ_S:
case MONO_CEE_BGE_S:
case MONO_CEE_BGT_S:
case MONO_CEE_BLE_S:
case MONO_CEE_BLT_S:
case MONO_CEE_BNE_UN_S:
case MONO_CEE_BGE_UN_S:
case MONO_CEE_BGT_UN_S:
case MONO_CEE_BLE_UN_S:
case MONO_CEE_BLT_UN_S:
MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET);
ADD_BINCOND (NULL);
sp = stack_start;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_BR:
MONO_INST_NEW (cfg, ins, OP_BR);
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_BRFALSE_S:
case MONO_CEE_BRTRUE_S:
case MONO_CEE_BRFALSE:
case MONO_CEE_BRTRUE: {
MonoInst *cmp;
gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE;
if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
UNVERIFIED;
sp--;
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
GET_BBLOCK (cfg, tblock, next_ip);
link_bblock (cfg, cfg->cbb, tblock);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
CHECK_UNVERIFIABLE (cfg);
}
MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
cmp->sreg1 = sp [0]->dreg;
type_from_op (cfg, cmp, sp [0], NULL);
CHECK_TYPE (cmp);
#if SIZEOF_REGISTER == 4
if (cmp->opcode == OP_LCOMPARE_IMM) {
/* Convert it to OP_LCOMPARE */
MONO_INST_NEW (cfg, ins, OP_I8CONST);
ins->type = STACK_I8;
ins->dreg = alloc_dreg (cfg, STACK_I8);
ins->inst_l = 0;
MONO_ADD_INS (cfg->cbb, ins);
cmp->opcode = OP_LCOMPARE;
cmp->sreg2 = ins->dreg;
}
#endif
MONO_ADD_INS (cfg->cbb, cmp);
MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
type_from_op (cfg, ins, sp [0], NULL);
MONO_ADD_INS (cfg->cbb, ins);
ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
GET_BBLOCK (cfg, tblock, target);
ins->inst_true_bb = tblock;
GET_BBLOCK (cfg, tblock, next_ip);
ins->inst_false_bb = tblock;
start_new_bblock = 2;
sp = stack_start;
inline_costs += BRANCH_COST;
break;
}
case MONO_CEE_BEQ:
case MONO_CEE_BGE:
case MONO_CEE_BGT:
case MONO_CEE_BLE:
case MONO_CEE_BLT:
case MONO_CEE_BNE_UN:
case MONO_CEE_BGE_UN:
case MONO_CEE_BGT_UN:
case MONO_CEE_BLE_UN:
case MONO_CEE_BLT_UN:
MONO_INST_NEW (cfg, ins, il_op);
ADD_BINCOND (NULL);
sp = stack_start;
inline_costs += BRANCH_COST;
break;
case MONO_CEE_SWITCH: {
MonoInst *src1;
MonoBasicBlock **targets;
MonoBasicBlock *default_bblock;
MonoJumpInfoBBTable *table;
int offset_reg = alloc_preg (cfg);
int target_reg = alloc_preg (cfg);
int table_reg = alloc_preg (cfg);
int sum_reg = alloc_preg (cfg);
gboolean use_op_switch;
n = read32 (ip + 1);
--sp;
src1 = sp [0];
if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
UNVERIFIED;
ip += 5;
GET_BBLOCK (cfg, default_bblock, next_ip);
default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
for (i = 0; i < n; ++i) {
GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip));
targets [i] = tblock;
targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
ip += 4;
}
if (sp != stack_start) {
/*
* Link the current bb with the targets as well, so handle_stack_args
* will set their in_stack correctly.
*/
link_bblock (cfg, cfg->cbb, default_bblock);
for (i = 0; i < n; ++i)
link_bblock (cfg, cfg->cbb, targets [i]);
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
/* Undo the links */
mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
for (i = 0; i < n; ++i)
mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
}
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
for (i = 0; i < n; ++i)
link_bblock (cfg, cfg->cbb, targets [i]);
table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
table->table = targets;
table->table_size = n;
use_op_switch = FALSE;
#ifdef TARGET_ARM
/* ARM implements SWITCH statements differently */
/* FIXME: Make it use the generic implementation */
if (!cfg->compile_aot)
use_op_switch = TRUE;
#endif
if (COMPILE_LLVM (cfg))
use_op_switch = TRUE;
cfg->cbb->has_jump_table = 1;
if (use_op_switch) {
MONO_INST_NEW (cfg, ins, OP_SWITCH);
ins->sreg1 = src1->dreg;
ins->inst_p0 = table;
ins->inst_many_bb = targets;
ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
MONO_ADD_INS (cfg->cbb, ins);
} else {
if (TARGET_SIZEOF_VOID_P == 8)
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
else
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
#if SIZEOF_REGISTER == 8
/* The upper word might not be zero, and we add it to a 64 bit address later */
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
#endif
if (cfg->compile_aot) {
MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
} else {
MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
ins->inst_p0 = table;
ins->dreg = table_reg;
MONO_ADD_INS (cfg->cbb, ins);
}
/* FIXME: Use load_memindex */
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
}
start_new_bblock = 1;
inline_costs += BRANCH_COST * 2;
break;
}
case MONO_CEE_LDIND_I1:
case MONO_CEE_LDIND_U1:
case MONO_CEE_LDIND_I2:
case MONO_CEE_LDIND_U2:
case MONO_CEE_LDIND_I4:
case MONO_CEE_LDIND_U4:
case MONO_CEE_LDIND_I8:
case MONO_CEE_LDIND_I:
case MONO_CEE_LDIND_R4:
case MONO_CEE_LDIND_R8:
case MONO_CEE_LDIND_REF:
--sp;
if (!(ins_flag & MONO_INST_NONULLCHECK))
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE);
ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag);
*sp++ = ins;
ins_flag = 0;
break;
case MONO_CEE_STIND_REF:
case MONO_CEE_STIND_I1:
case MONO_CEE_STIND_I2:
case MONO_CEE_STIND_I4:
case MONO_CEE_STIND_I8:
case MONO_CEE_STIND_R4:
case MONO_CEE_STIND_R8:
case MONO_CEE_STIND_I: {
sp -= 2;
if (il_op == MONO_CEE_STIND_REF && sp [1]->type != STACK_OBJ) {
/* stind.ref must only be used with object references. */
UNVERIFIED;
}
if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8)
sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]);
mini_emit_memory_store (cfg, m_class_get_byval_arg (stind_to_type (il_op)), sp [0], sp [1], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
}
case MONO_CEE_MUL:
MONO_INST_NEW (cfg, ins, il_op);
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
/* Use the immediate opcodes if possible */
int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) {
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
ins->sreg2 = -1;
NULLIFY_INS (sp [1]);
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
*sp++ = mono_decompose_opcode (cfg, ins);
break;
case MONO_CEE_ADD:
case MONO_CEE_SUB:
case MONO_CEE_DIV:
case MONO_CEE_DIV_UN:
case MONO_CEE_REM:
case MONO_CEE_REM_UN:
case MONO_CEE_AND:
case MONO_CEE_OR:
case MONO_CEE_XOR:
case MONO_CEE_SHL:
case MONO_CEE_SHR:
case MONO_CEE_SHR_UN: {
MONO_INST_NEW (cfg, ins, il_op);
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
add_widen_op (cfg, ins, &sp [0], &sp [1]);
ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
/* Use the immediate opcodes if possible */
int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) &&
mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
if (imm_opcode != -1) {
ins->opcode = imm_opcode;
if (sp [1]->opcode == OP_I8CONST) {
#if SIZEOF_REGISTER == 8
ins->inst_imm = sp [1]->inst_l;
#else
ins->inst_l = sp [1]->inst_l;
#endif
} else {
ins->inst_imm = (gssize)(sp [1]->inst_c0);
}
ins->sreg2 = -1;
/* Might be followed by an instruction added by add_widen_op */
if (sp [1]->next == NULL)
NULLIFY_INS (sp [1]);
}
}
MONO_ADD_INS ((cfg)->cbb, (ins));
*sp++ = mono_decompose_opcode (cfg, ins);
break;
}
case MONO_CEE_NEG:
case MONO_CEE_NOT:
case MONO_CEE_CONV_I1:
case MONO_CEE_CONV_I2:
case MONO_CEE_CONV_I4:
case MONO_CEE_CONV_R4:
case MONO_CEE_CONV_R8:
case MONO_CEE_CONV_U4:
case MONO_CEE_CONV_I8:
case MONO_CEE_CONV_U8:
case MONO_CEE_CONV_OVF_I8:
case MONO_CEE_CONV_OVF_U8:
case MONO_CEE_CONV_R_UN:
/* Special case this earlier so we have long constants in the IR */
if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) {
int data = sp [-1]->inst_c0;
sp [-1]->opcode = OP_I8CONST;
sp [-1]->type = STACK_I8;
#if SIZEOF_REGISTER == 8
if (il_op == MONO_CEE_CONV_U8)
sp [-1]->inst_c0 = (guint32)data;
else
sp [-1]->inst_c0 = data;
#else
if (il_op == MONO_CEE_CONV_U8)
sp [-1]->inst_l = (guint32)data;
else
sp [-1]->inst_l = data;
#endif
sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
}
else {
ADD_UNOP (il_op);
}
break;
case MONO_CEE_CONV_OVF_I4:
case MONO_CEE_CONV_OVF_I1:
case MONO_CEE_CONV_OVF_I2:
case MONO_CEE_CONV_OVF_I:
case MONO_CEE_CONV_OVF_I1_UN:
case MONO_CEE_CONV_OVF_I2_UN:
case MONO_CEE_CONV_OVF_I4_UN:
case MONO_CEE_CONV_OVF_I8_UN:
case MONO_CEE_CONV_OVF_I_UN:
if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
/* floats are always signed, _UN has no effect */
ADD_UNOP (CEE_CONV_OVF_I8);
if (il_op == MONO_CEE_CONV_OVF_I1_UN)
ADD_UNOP (MONO_CEE_CONV_OVF_I1);
else if (il_op == MONO_CEE_CONV_OVF_I2_UN)
ADD_UNOP (MONO_CEE_CONV_OVF_I2);
else if (il_op == MONO_CEE_CONV_OVF_I4_UN)
ADD_UNOP (MONO_CEE_CONV_OVF_I4);
else if (il_op == MONO_CEE_CONV_OVF_I8_UN)
;
else
ADD_UNOP (il_op);
} else {
ADD_UNOP (il_op);
}
break;
case MONO_CEE_CONV_OVF_U1:
case MONO_CEE_CONV_OVF_U2:
case MONO_CEE_CONV_OVF_U4:
case MONO_CEE_CONV_OVF_U:
case MONO_CEE_CONV_OVF_U1_UN:
case MONO_CEE_CONV_OVF_U2_UN:
case MONO_CEE_CONV_OVF_U4_UN:
case MONO_CEE_CONV_OVF_U8_UN:
case MONO_CEE_CONV_OVF_U_UN:
if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
/* floats are always signed, _UN has no effect */
ADD_UNOP (CEE_CONV_OVF_U8);
ADD_UNOP (il_op);
} else {
ADD_UNOP (il_op);
}
break;
case MONO_CEE_CONV_U2:
case MONO_CEE_CONV_U1:
case MONO_CEE_CONV_I:
case MONO_CEE_CONV_U:
ADD_UNOP (il_op);
CHECK_CFG_EXCEPTION;
break;
case MONO_CEE_ADD_OVF:
case MONO_CEE_ADD_OVF_UN:
case MONO_CEE_MUL_OVF:
case MONO_CEE_MUL_OVF_UN:
case MONO_CEE_SUB_OVF:
case MONO_CEE_SUB_OVF_UN:
MONO_INST_NEW (cfg, ins, il_op);
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
type_from_op (cfg, ins, sp [0], sp [1]);
CHECK_TYPE (ins);
if (ovf_exc)
ins->inst_exc_name = ovf_exc;
else
ins->inst_exc_name = "OverflowException";
/* Have to insert a widening op */
add_widen_op (cfg, ins, &sp [0], &sp [1]);
ins->dreg = alloc_dreg (cfg, (MonoStackType)(ins)->type);
MONO_ADD_INS ((cfg)->cbb, ins);
/* The opcode might be emulated, so need to special case this */
if (ovf_exc && mono_find_jit_opcode_emulation (ins->opcode)) {
switch (ins->opcode) {
case OP_IMUL_OVF_UN:
/* This opcode is just a placeholder, it will be emulated also */
ins->opcode = OP_IMUL_OVF_UN_OOM;
break;
case OP_LMUL_OVF_UN:
/* This opcode is just a placeholder, it will be emulated also */
ins->opcode = OP_LMUL_OVF_UN_OOM;
break;
default:
g_assert_not_reached ();
}
}
ovf_exc = NULL;
*sp++ = mono_decompose_opcode (cfg, ins);
break;
case MONO_CEE_CPOBJ:
GSHAREDVT_FAILURE (il_op);
GSHAREDVT_FAILURE (*ip);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
sp -= 2;
mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
ins_flag = 0;
break;
case MONO_CEE_LDOBJ: {
int loc_index = -1;
int stloc_len = 0;
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
/* Optimize the common ldobj+stloc combination */
if (next_ip < end) {
switch (next_ip [0]) {
case MONO_CEE_STLOC_S:
CHECK_OPSIZE (7);
loc_index = next_ip [1];
stloc_len = 2;
break;
case MONO_CEE_STLOC_0:
case MONO_CEE_STLOC_1:
case MONO_CEE_STLOC_2:
case MONO_CEE_STLOC_3:
loc_index = next_ip [0] - CEE_STLOC_0;
stloc_len = 1;
break;
default:
break;
}
}
if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) {
CHECK_LOCAL (loc_index);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0);
ins->dreg = cfg->locals [loc_index]->dreg;
ins->flags |= ins_flag;
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip += stloc_len;
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
break;
}
/* Optimize the ldobj+stobj combination */
if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) {
CHECK_STACK (1);
sp --;
mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip += 5;
ins_flag = 0;
break;
}
ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag);
*sp++ = ins;
ins_flag = 0;
inline_costs += 1;
break;
}
case MONO_CEE_LDSTR:
if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
ins->type = STACK_OBJ;
*sp = ins;
}
else if (method->wrapper_type != MONO_WRAPPER_NONE) {
MonoInst *iargs [1];
char *str = (char *)mono_method_get_wrapper_data (method, n);
if (cfg->compile_aot)
EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
else
EMIT_NEW_PCONST (cfg, iargs [0], str);
*sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs);
} else {
{
if (cfg->cbb->out_of_line) {
MonoInst *iargs [2];
if (image == mono_defaults.corlib) {
/*
* Avoid relocations in AOT and save some space by using a
* version of helper_ldstr specialized to mscorlib.
*/
EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
*sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
} else {
/* Avoid creating the string object */
EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
*sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
}
}
else
if (cfg->compile_aot) {
NEW_LDSTRCONST (cfg, ins, image, n);
*sp = ins;
MONO_ADD_INS (cfg->cbb, ins);
}
else {
NEW_PCONST (cfg, ins, NULL);
ins->type = STACK_OBJ;
ins->inst_p0 = mono_ldstr_checked (image, mono_metadata_token_index (n), cfg->error);
CHECK_CFG_ERROR;
if (!ins->inst_p0)
OUT_OF_MEMORY_FAILURE;
*sp = ins;
MONO_ADD_INS (cfg->cbb, ins);
}
}
}
sp++;
break;
case MONO_CEE_NEWOBJ: {
MonoInst *iargs [2];
MonoMethodSignature *fsig;
MonoInst this_ins;
MonoInst *alloc;
MonoInst *vtable_arg = NULL;
cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
CHECK_CFG_ERROR;
fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
mono_save_token_info (cfg, image, token, cmethod);
if (!mono_class_init_internal (cmethod->klass))
TYPE_LOAD_ERROR (cmethod->klass);
context_used = mini_method_check_context_used (cfg, cmethod);
if (!dont_verify && !cfg->skip_visibility) {
MonoMethod *cil_method = cmethod;
MonoMethod *target_method = cil_method;
if (method->is_inflated) {
MonoGenericContainer *container = mono_method_get_generic_container(method_definition);
MonoGenericContext *context = (container != NULL ? &container->context : NULL);
target_method = mini_get_method_allow_open (method, token, NULL, context, cfg->error);
CHECK_CFG_ERROR;
}
if (!mono_method_can_access_method (method_definition, target_method) &&
!mono_method_can_access_method (method, cil_method))
emit_method_access_failure (cfg, method, cil_method);
}
if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
emit_class_init (cfg, cmethod->klass);
CHECK_TYPELOAD (cmethod->klass);
}
/*
if (cfg->gsharedvt) {
if (mini_is_gsharedvt_variable_signature (sig))
GSHAREDVT_FAILURE (il_op);
}
*/
n = fsig->param_count;
CHECK_STACK (n);
/*
* Generate smaller code for the common newobj <exception> instruction in
* argument checking code.
*/
if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
is_exception_class (cmethod->klass) && n <= 2 &&
((n < 1) || (!m_type_is_byref (fsig->params [0]) && fsig->params [0]->type == MONO_TYPE_STRING)) &&
((n < 2) || (!m_type_is_byref (fsig->params [1]) && fsig->params [1]->type == MONO_TYPE_STRING))) {
MonoInst *iargs [3];
sp -= n;
EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass));
switch (n) {
case 0:
*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
break;
case 1:
iargs [1] = sp [0];
*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
break;
case 2:
iargs [1] = sp [0];
iargs [2] = sp [1];
*sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
break;
default:
g_assert_not_reached ();
}
inline_costs += 5;
break;
}
/* move the args to allow room for 'this' in the first position */
while (n--) {
--sp;
sp [1] = sp [0];
}
for (int i = 0; i < fsig->param_count; ++i)
sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
/* check_call_signature () requires sp[0] to be set */
this_ins.type = STACK_OBJ;
sp [0] = &this_ins;
if (check_call_signature (cfg, fsig, sp))
UNVERIFIED;
iargs [0] = NULL;
if (mini_class_is_system_array (cmethod->klass)) {
*sp = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
MonoJitICallId function = MONO_JIT_ICALL_ZeroIsReserved;
int rank = m_class_get_rank (cmethod->klass);
int n = fsig->param_count;
/* Optimize the common cases, use ctor using length for each rank (no lbound). */
if (n == rank) {
switch (n) {
case 1: function = MONO_JIT_ICALL_mono_array_new_1;
break;
case 2: function = MONO_JIT_ICALL_mono_array_new_2;
break;
case 3: function = MONO_JIT_ICALL_mono_array_new_3;
break;
case 4: function = MONO_JIT_ICALL_mono_array_new_4;
break;
default:
break;
}
}
/* Regular case, rank > 4 or legnth, lbound specified per rank. */
if (function == MONO_JIT_ICALL_ZeroIsReserved) {
// FIXME Maximum value of param_count? Realistically 64. Fits in imm?
if (!array_new_localalloc_ins) {
MONO_INST_NEW (cfg, array_new_localalloc_ins, OP_LOCALLOC_IMM);
array_new_localalloc_ins->dreg = alloc_preg (cfg);
cfg->flags |= MONO_CFG_HAS_ALLOCA;
MONO_ADD_INS (init_localsbb, array_new_localalloc_ins);
}
array_new_localalloc_ins->inst_imm = MAX (array_new_localalloc_ins->inst_imm, n * sizeof (target_mgreg_t));
int dreg = array_new_localalloc_ins->dreg;
if (2 * rank == n) {
/* [lbound, length, lbound, length, ...]
* mono_array_new_n_icall expects a non-interleaved list of
* lbounds and lengths, so deinterleave here.
*/
for (int l = 0; l < 2; ++l) {
int src = l;
int dst = l * rank;
for (int r = 0; r < rank; ++r, src += 2, ++dst) {
NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, dst * sizeof (target_mgreg_t), sp [src + 1]->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
}
} else {
/* [length, length, length, ...] */
for (int i = 0; i < n; ++i) {
NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
}
EMIT_NEW_ICONST (cfg, ins, n);
sp [1] = ins;
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), dreg);
ins->type = STACK_PTR;
sp [2] = ins;
// FIXME Adjust sp by n - 3? Attempts failed.
function = MONO_JIT_ICALL_mono_array_new_n_icall;
}
alloc = mono_emit_jit_icall_id (cfg, function, sp);
} else if (cmethod->string_ctor) {
g_assert (!context_used);
g_assert (!vtable_arg);
/* we simply pass a null pointer */
EMIT_NEW_PCONST (cfg, *sp, NULL);
/* now call the string ctor */
alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
} else {
if (m_class_is_valuetype (cmethod->klass)) {
iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL);
mini_emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass));
EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
alloc = NULL;
/*
* The code generated by mini_emit_virtual_call () expects
* iargs [0] to be a boxed instance, but luckily the vcall
* will be transformed into a normal call there.
*/
} else if (context_used) {
alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
*sp = alloc;
} else {
MonoVTable *vtable = NULL;
if (!cfg->compile_aot)
vtable = mono_class_vtable_checked (cmethod->klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (cmethod->klass);
/*
* TypeInitializationExceptions thrown from the mono_runtime_class_init
* call in mono_jit_runtime_invoke () can abort the finalizer thread.
* As a workaround, we call class cctors before allocating objects.
*/
if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
emit_class_init (cfg, cmethod->klass);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass));
class_inits = g_slist_prepend (class_inits, cmethod->klass);
}
alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
*sp = alloc;
}
CHECK_CFG_EXCEPTION; /*for handle_alloc*/
if (alloc)
MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
/* Now call the actual ctor */
int ctor_inline_costs = 0;
handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &ctor_inline_costs);
// don't contribute to inline_const if ctor has [MethodImpl(MethodImplOptions.AggressiveInlining)]
if (!COMPILE_LLVM(cfg) || !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
inline_costs += ctor_inline_costs;
CHECK_CFG_EXCEPTION;
}
if (alloc == NULL) {
/* Valuetype */
EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins);
*sp++= ins;
} else {
*sp++ = alloc;
}
inline_costs += 5;
if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
break;
}
case MONO_CEE_CASTCLASS:
case MONO_CEE_ISINST: {
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = (*sp)->dreg;
ins->klass = klass;
ins->type = STACK_OBJ;
MONO_ADD_INS (cfg->cbb, ins);
CHECK_CFG_EXCEPTION;
*sp++ = ins;
cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
break;
}
case MONO_CEE_UNBOX_ANY: {
MonoInst *res, *addr;
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
if (mini_is_gsharedvt_klass (klass)) {
res = handle_unbox_gsharedvt (cfg, klass, *sp);
inline_costs += 2;
} else if (mini_class_is_reference (klass)) {
if (MONO_INS_IS_PCONST_NULL (*sp)) {
EMIT_NEW_PCONST (cfg, res, NULL);
res->type = STACK_OBJ;
} else {
MONO_INST_NEW (cfg, res, OP_CASTCLASS);
res->dreg = alloc_preg (cfg);
res->sreg1 = (*sp)->dreg;
res->klass = klass;
res->type = STACK_OBJ;
MONO_ADD_INS (cfg->cbb, res);
cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
}
} else if (mono_class_is_nullable (klass)) {
res = handle_unbox_nullable (cfg, *sp, klass, context_used);
} else {
addr = mini_handle_unbox (cfg, klass, *sp, context_used);
/* LDOBJ */
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
res = ins;
inline_costs += 2;
}
*sp ++ = res;
break;
}
case MONO_CEE_BOX: {
MonoInst *val;
MonoClass *enum_class;
MonoMethod *has_flag;
MonoMethodSignature *has_flag_sig;
--sp;
val = *sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
if (mini_class_is_reference (klass)) {
*sp++ = val;
break;
}
val = convert_value (cfg, m_class_get_byval_arg (klass), val);
if (klass == mono_defaults.void_class)
UNVERIFIED;
if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val))
UNVERIFIED;
/* frequent check in generic code: box (struct), brtrue */
/*
* Look for:
*
* <push int/long ptr>
* <push int/long>
* box MyFlags
* constrained. MyFlags
* callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
*
* If we find this sequence and the operand types on box and constrained
* are equal, we can emit a specialized instruction sequence instead of
* the very slow HasFlag () call.
* This code sequence is generated by older mcs/csc, the newer one is handled in
* emit_inst_for_method ().
*/
guint32 constrained_token;
guint32 callvirt_token;
if ((cfg->opt & MONO_OPT_INTRINS) &&
// FIXME ip_in_bb as we go?
next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
(ip = il_read_constrained (next_ip, end, &constrained_token)) &&
ip_in_bb (cfg, cfg->cbb, ip) &&
(ip = il_read_callvirt (ip, end, &callvirt_token)) &&
ip_in_bb (cfg, cfg->cbb, ip) &&
m_class_is_enumtype (klass) &&
(enum_class = mini_get_class (method, constrained_token, generic_context)) &&
(has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) &&
has_flag->klass == mono_defaults.enum_class &&
!strcmp (has_flag->name, "HasFlag") &&
(has_flag_sig = mono_method_signature_internal (has_flag)) &&
has_flag_sig->hasthis &&
has_flag_sig->param_count == 1) {
CHECK_TYPELOAD (enum_class);
if (enum_class == klass) {
MonoInst *enum_this, *enum_flag;
next_ip = ip;
il_op = MONO_CEE_CALLVIRT;
--sp;
enum_this = sp [0];
enum_flag = sp [1];
*sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag);
break;
}
}
guint32 unbox_any_token;
/*
* Common in generic code:
* box T1, unbox.any T2.
*/
if ((cfg->opt & MONO_OPT_INTRINS) &&
next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
(ip = il_read_unbox_any (next_ip, end, &unbox_any_token))) {
MonoClass *unbox_klass = mini_get_class (method, unbox_any_token, generic_context);
CHECK_TYPELOAD (unbox_klass);
if (klass == unbox_klass) {
next_ip = ip;
*sp++ = val;
break;
}
}
// Optimize
//
// box
// call object::GetType()
//
guint32 gettype_token;
if ((ip = il_read_call(next_ip, end, &gettype_token)) && ip_in_bb (cfg, cfg->cbb, ip)) {
MonoMethod* gettype_method = mini_get_method (cfg, method, gettype_token, NULL, generic_context);
if (!strcmp (gettype_method->name, "GetType") && gettype_method->klass == mono_defaults.object_class) {
mono_class_init_internal(klass);
if (mono_class_get_checked (m_class_get_image (klass), m_class_get_type_token (klass), error) == klass) {
if (cfg->compile_aot) {
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (klass), m_class_get_type_token (klass), generic_context);
} else {
MonoType *klass_type = m_class_get_byval_arg (klass);
MonoReflectionType* reflection_type = mono_type_get_object_checked (klass_type, cfg->error);
EMIT_NEW_PCONST (cfg, ins, reflection_type);
}
ins->type = STACK_OBJ;
ins->klass = mono_defaults.systemtype_class;
*sp++ = ins;
next_ip = ip;
break;
}
}
}
// Optimize
//
// box
// ldnull
// ceq (or cgt.un)
//
// to just
//
// ldc.i4.0 (or 1)
guchar* ldnull_ip;
if ((ldnull_ip = il_read_op (next_ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) {
gboolean is_eq = FALSE, is_neq = FALSE;
if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ)))
is_eq = TRUE;
else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN)))
is_neq = TRUE;
if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) &&
!mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) {
next_ip = ip;
il_op = (MonoOpcodeEnum) (is_eq ? CEE_LDC_I4_0 : CEE_LDC_I4_1);
EMIT_NEW_ICONST (cfg, ins, is_eq ? 0 : 1);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
}
guint32 isinst_tk = 0;
if ((ip = il_read_op_and_token (next_ip, end, CEE_ISINST, MONO_CEE_ISINST, &isinst_tk)) &&
ip_in_bb (cfg, cfg->cbb, ip)) {
MonoClass *isinst_class = mini_get_class (method, isinst_tk, generic_context);
if (!mono_class_is_nullable (klass) && !mono_class_is_nullable (isinst_class) &&
!mini_is_gsharedvt_variable_klass (klass) && !mini_is_gsharedvt_variable_klass (isinst_class) &&
!mono_class_is_open_constructed_type (m_class_get_byval_arg (klass)) &&
!mono_class_is_open_constructed_type (m_class_get_byval_arg (isinst_class))) {
// Optimize
//
// box
// isinst [Type]
// brfalse/brtrue
//
// to
//
// ldc.i4.0 (or 1)
// brfalse/brtrue
//
guchar* br_ip = NULL;
if ((br_ip = il_read_brtrue (ip, end, &target)) || (br_ip = il_read_brtrue_s (ip, end, &target)) ||
(br_ip = il_read_brfalse (ip, end, &target)) || (br_ip = il_read_brfalse_s (ip, end, &target))) {
gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass);
next_ip = ip;
il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0);
EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
// Optimize
//
// box
// isinst [Type]
// ldnull
// ceq/cgt.un
//
// to
//
// ldc.i4.0 (or 1)
//
guchar* ldnull_ip = NULL;
if ((ldnull_ip = il_read_op (ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) {
gboolean is_eq = FALSE, is_neq = FALSE;
if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ)))
is_eq = TRUE;
else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN)))
is_neq = TRUE;
if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) &&
!mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) {
gboolean isinst = mono_class_is_assignable_from_internal (isinst_class, klass);
next_ip = ip;
if (is_eq)
isinst = !isinst;
il_op = (MonoOpcodeEnum) (isinst ? CEE_LDC_I4_1 : CEE_LDC_I4_0);
EMIT_NEW_ICONST (cfg, ins, isinst ? 1 : 0);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
}
// Optimize
//
// box
// isinst [Type]
// unbox.any
//
// to
//
// nop
//
guchar* unbox_ip = NULL;
guint32 unbox_token = 0;
if ((unbox_ip = il_read_unbox_any (ip, end, &unbox_token)) && ip_in_bb (cfg, cfg->cbb, unbox_ip)) {
MonoClass *unbox_klass = mini_get_class (method, unbox_token, generic_context);
CHECK_TYPELOAD (unbox_klass);
if (!mono_class_is_nullable (unbox_klass) &&
!mini_is_gsharedvt_klass (unbox_klass) &&
klass == isinst_class &&
klass == unbox_klass)
{
*sp++ = val;
next_ip = unbox_ip;
break;
}
}
}
}
gboolean is_true;
// FIXME: LLVM can't handle the inconsistent bb linking
if (!mono_class_is_nullable (klass) &&
!mini_is_gsharedvt_klass (klass) &&
next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) ||
(is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) ||
(ip = il_read_brfalse (next_ip, end, &target)) ||
(ip = il_read_brfalse_s (next_ip, end, &target)))) {
int dreg;
MonoBasicBlock *true_bb, *false_bb;
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip = ip;
if (cfg->verbose_level > 3) {
printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
printf ("<box+brtrue opt>\n");
}
/*
* We need to link both bblocks, since it is needed for handling stack
* arguments correctly (See test_0_box_brtrue_opt_regress_81102).
* Branching to only one of them would lead to inconsistencies, so
* generate an ICONST+BRTRUE, the branch opts will get rid of them.
*/
GET_BBLOCK (cfg, true_bb, target);
GET_BBLOCK (cfg, false_bb, next_ip);
mono_link_bblock (cfg, cfg->cbb, true_bb);
mono_link_bblock (cfg, cfg->cbb, false_bb);
if (sp != stack_start) {
handle_stack_args (cfg, stack_start, sp - stack_start);
sp = stack_start;
CHECK_UNVERIFIABLE (cfg);
}
if (COMPILE_LLVM (cfg)) {
dreg = alloc_ireg (cfg);
MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
} else {
/* The JIT can't eliminate the iconst+compare */
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = is_true ? true_bb : false_bb;
MONO_ADD_INS (cfg->cbb, ins);
}
start_new_bblock = 1;
break;
}
if (m_class_is_enumtype (klass) && !mini_is_gsharedvt_klass (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) {
/* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */
if (val->opcode == OP_ICONST) {
MONO_INST_NEW (cfg, ins, OP_BOX_ICONST);
ins->type = STACK_OBJ;
ins->klass = klass;
ins->inst_c0 = val->inst_c0;
ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
} else {
MONO_INST_NEW (cfg, ins, OP_BOX);
ins->type = STACK_OBJ;
ins->klass = klass;
ins->sreg1 = val->dreg;
ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
}
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
} else {
*sp++ = mini_emit_box (cfg, val, klass, context_used);
}
CHECK_CFG_EXCEPTION;
inline_costs += 1;
break;
}
case MONO_CEE_UNBOX: {
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_save_token_info (cfg, image, token, klass);
context_used = mini_class_check_context_used (cfg, klass);
if (mono_class_is_nullable (klass)) {
MonoInst *val;
val = handle_unbox_nullable (cfg, *sp, klass, context_used);
EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass));
*sp++= ins;
} else {
ins = mini_handle_unbox (cfg, klass, *sp, context_used);
*sp++ = ins;
}
inline_costs += 2;
break;
}
case MONO_CEE_LDFLD:
case MONO_CEE_LDFLDA:
case MONO_CEE_STFLD:
case MONO_CEE_LDSFLD:
case MONO_CEE_LDSFLDA:
case MONO_CEE_STSFLD: {
MonoClassField *field;
guint foffset;
gboolean is_instance;
gpointer addr = NULL;
gboolean is_special_static;
MonoType *ftype;
MonoInst *store_val = NULL;
MonoInst *thread_ins;
is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD);
if (is_instance) {
if (il_op == MONO_CEE_STFLD) {
sp -= 2;
store_val = sp [1];
} else {
--sp;
}
if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
UNVERIFIED;
if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE)
UNVERIFIED;
} else {
if (il_op == MONO_CEE_STSFLD) {
sp--;
store_val = sp [0];
}
}
if (method->wrapper_type != MONO_WRAPPER_NONE) {
field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
klass = m_field_get_parent (field);
}
else {
klass = NULL;
field = mono_field_from_token_checked (image, token, &klass, generic_context, cfg->error);
if (!field)
CHECK_TYPELOAD (klass);
CHECK_CFG_ERROR;
}
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
FIELD_ACCESS_FAILURE (method, field);
mono_class_init_internal (klass);
mono_class_setup_fields (klass);
ftype = mono_field_get_type_internal (field);
/*
* LDFLD etc. is usable on static fields as well, so convert those cases to
* the static case.
*/
if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
switch (il_op) {
case MONO_CEE_LDFLD:
il_op = MONO_CEE_LDSFLD;
break;
case MONO_CEE_STFLD:
il_op = MONO_CEE_STSFLD;
break;
case MONO_CEE_LDFLDA:
il_op = MONO_CEE_LDSFLDA;
break;
default:
g_assert_not_reached ();
}
is_instance = FALSE;
}
context_used = mini_class_check_context_used (cfg, klass);
if (il_op == MONO_CEE_LDSFLD) {
ins = mini_emit_inst_for_field_load (cfg, field);
if (ins) {
*sp++ = ins;
goto field_access_end;
}
}
/* INSTANCE CASE */
if (is_instance)
g_assert (field->offset);
foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
if (il_op == MONO_CEE_STFLD) {
sp [1] = convert_value (cfg, field->type, sp [1]);
if (target_type_is_incompatible (cfg, field->type, sp [1]))
UNVERIFIED;
{
MonoInst *store;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
context_used = mini_class_check_context_used (cfg, klass);
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
store = mini_emit_storing_write_barrier (cfg, ins, sp [1]);
} else {
/* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
}
} else {
if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
/* insert call to write barrier */
MonoInst *ptr;
int dreg;
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
store = mini_emit_storing_write_barrier (cfg, ptr, sp [1]);
} else {
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
}
}
if (sp [0]->opcode != OP_LDADDR)
store->flags |= MONO_INST_FAULT;
store->flags |= ins_flag;
}
goto field_access_end;
}
if (is_instance) {
if (sp [0]->type == STACK_VTYPE) {
MonoInst *var;
/* Have to compute the address of the variable */
var = get_vreg_to_inst (cfg, sp [0]->dreg);
if (!var)
var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg);
else
g_assert (var->klass == klass);
EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass));
sp [0] = ins;
}
if (il_op == MONO_CEE_LDFLDA) {
if (sp [0]->type == STACK_OBJ) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
}
dreg = alloc_ireg_mp (cfg);
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
} else {
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
}
ins->klass = mono_class_from_mono_type_internal (field->type);
ins->type = STACK_MP;
*sp++ = ins;
} else {
MonoInst *load;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) {
ins = mono_emit_simd_field_load (cfg, field, sp [0]);
if (ins) {
*sp++ = ins;
goto field_access_end;
}
}
#endif
MonoInst *field_add_inst = sp [0];
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
foffset = 0;
}
load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
if (sp [0]->opcode != OP_LDADDR)
load->flags |= MONO_INST_FAULT;
*sp++ = load;
}
}
if (is_instance)
goto field_access_end;
/* STATIC CASE */
context_used = mini_class_check_context_used (cfg, klass);
if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
mono_error_set_field_missing (cfg->error, m_field_get_parent (field), field->name, NULL, "Using static instructions with literal field");
CHECK_CFG_ERROR;
}
/* The special_static_fields field is init'd in mono_class_vtable, so it needs
* to be called here.
*/
if (!context_used) {
mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
}
addr = mono_special_static_field_get_offset (field, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
is_special_static = mono_class_field_is_special_static (field);
if (is_special_static && ((gsize)addr & 0x80000000) == 0)
thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
else
thread_ins = NULL;
/* Generate IR to compute the field address */
if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins &&
!(context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))) {
/*
* Fast access to TLS data
* Inline version of get_thread_static_data () in
* threads.c.
*/
guint32 offset;
int idx, static_data_reg, array_reg, dreg;
static_data_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
if (cfg->compile_aot || context_used) {
int offset_reg, offset2_reg, idx_reg;
/* For TLS variables, this will return the TLS offset */
if (context_used) {
MonoInst *addr_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, addr_ins->dreg, addr_ins->dreg, 1);
} else {
EMIT_NEW_SFLDACONST (cfg, ins, field);
}
offset_reg = ins->dreg;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
idx_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
offset2_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
} else {
offset = (gsize)addr & 0x7fffffff;
idx = offset & 0x3f;
array_reg = alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P);
dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
}
} else if ((cfg->compile_aot && is_special_static) ||
(context_used && is_special_static)) {
MonoInst *iargs [1];
g_assert (m_field_get_parent (field));
if (context_used) {
iargs [0] = emit_get_rgctx_field (cfg, context_used,
field, MONO_RGCTX_INFO_CLASS_FIELD);
} else {
EMIT_NEW_FIELDCONST (cfg, iargs [0], field);
}
ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
} else if (context_used) {
MonoInst *static_data;
/*
g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
method->klass->name_space, method->klass->name, method->name,
depth, field->offset);
*/
if (mono_class_needs_cctor_run (klass, method))
emit_class_init (cfg, klass);
/*
* The pointer we're computing here is
*
* super_info.static_data + field->offset
*/
static_data = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_STATIC_DATA);
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
/* The value is offset by 1 */
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
dreg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
} else if (field->offset == 0) {
ins = static_data;
} else {
int addr_reg = mono_alloc_preg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
}
} else if (cfg->compile_aot && addr) {
MonoInst *iargs [1];
g_assert (m_field_get_parent (field));
EMIT_NEW_FIELDCONST (cfg, iargs [0], field);
ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
} else {
MonoVTable *vtable = NULL;
if (!cfg->compile_aot)
vtable = mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
if (!addr) {
if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
if (!(g_slist_find (class_inits, klass))) {
emit_class_init (cfg, klass);
if (cfg->verbose_level > 2)
printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field));
class_inits = g_slist_prepend (class_inits, klass);
}
} else {
if (cfg->run_cctors) {
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
g_assert (vtable);
if (!vtable->initialized && m_class_has_cctor (vtable->klass))
INLINE_FAILURE ("class init");
if (!mono_runtime_class_init_full (vtable, cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
}
}
}
if (cfg->compile_aot)
EMIT_NEW_SFLDACONST (cfg, ins, field);
else {
g_assert (vtable);
addr = mono_static_field_get_addr (vtable, field);
g_assert (addr);
EMIT_NEW_PCONST (cfg, ins, addr);
}
} else {
MonoInst *iargs [1];
EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
}
}
/* Generate IR to do the actual load/store operation */
if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD)) {
if (ins_flag & MONO_INST_VOLATILE) {
/* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
} else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (ftype)) {
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
}
}
if (il_op == MONO_CEE_LDSFLDA) {
ins->klass = mono_class_from_mono_type_internal (ftype);
ins->type = STACK_PTR;
*sp++ = ins;
} else if (il_op == MONO_CEE_STSFLD) {
MonoInst *store;
EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
store->flags |= ins_flag;
} else {
gboolean is_const = FALSE;
MonoVTable *vtable = NULL;
gpointer addr = NULL;
if (!context_used) {
vtable = mono_class_vtable_checked (klass, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (klass);
}
if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
(!context_used && !cfg->compile_aot && vtable->initialized))) {
int ro_type = ftype->type;
if (!addr)
addr = mono_static_field_get_addr (vtable, field);
if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) {
ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type;
}
GSHAREDVT_FAILURE (il_op);
/* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
is_const = TRUE;
switch (ro_type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
sp++;
break;
case MONO_TYPE_I1:
EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
sp++;
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
sp++;
break;
case MONO_TYPE_I2:
EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
sp++;
break;
break;
case MONO_TYPE_I4:
EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
sp++;
break;
case MONO_TYPE_U4:
EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
sp++;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
mini_type_to_eval_stack_type ((cfg), field->type, *sp);
sp++;
break;
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
if (!mono_gc_is_moving ()) {
EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
mini_type_to_eval_stack_type ((cfg), field->type, *sp);
sp++;
} else {
is_const = FALSE;
}
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
sp++;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_VALUETYPE:
default:
is_const = FALSE;
break;
}
}
if (!is_const) {
MonoInst *load;
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
load->flags |= ins_flag;
*sp++ = load;
}
}
field_access_end:
if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
/* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
}
ins_flag = 0;
break;
}
case MONO_CEE_STOBJ:
sp -= 2;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
/* FIXME: should check item at sp [1] is compatible with the type of the store. */
mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
/*
* Array opcodes
*/
case MONO_CEE_NEWARR: {
MonoInst *len_ins;
const char *data_ptr;
int data_size = 0;
guint32 field_token;
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID)
UNVERIFIED;
context_used = mini_class_check_context_used (cfg, klass);
#ifndef TARGET_S390X
if (sp [0]->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4) {
MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I4;
ins->dreg = alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
#else
/* The array allocator expects a 64-bit input, and we cannot rely
on the high bits of a 32-bit result, so we have to extend. */
if (sp [0]->type == STACK_I4 && TARGET_SIZEOF_VOID_P == 8) {
MONO_INST_NEW (cfg, ins, OP_ICONV_TO_I8);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_I8;
ins->dreg = alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
*sp = mono_decompose_opcode (cfg, ins);
}
#endif
if (context_used) {
MonoInst *args [3];
MonoClass *array_class = mono_class_create_array (klass, 1);
MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
/* FIXME: Use OP_NEWARR and decompose later to help abcrem */
/* vtable */
args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
array_class, MONO_RGCTX_INFO_VTABLE);
/* array len */
args [1] = sp [0];
if (managed_alloc)
ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
else
ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
} else {
/* Decompose later since it is needed by abcrem */
MonoClass *array_type = mono_class_create_array (klass, 1);
mono_class_vtable_checked (array_type, cfg->error);
CHECK_CFG_ERROR;
CHECK_TYPELOAD (array_type);
MONO_INST_NEW (cfg, ins, OP_NEWARR);
ins->dreg = alloc_ireg_ref (cfg);
ins->sreg1 = sp [0]->dreg;
ins->inst_newa_class = klass;
ins->type = STACK_OBJ;
ins->klass = array_type;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
/* Needed so mono_emit_load_get_addr () gets called */
mono_get_got_var (cfg);
}
len_ins = sp [0];
ip += 5;
*sp++ = ins;
inline_costs += 1;
/*
* we inline/optimize the initialization sequence if possible.
* we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
* for small sizes open code the memcpy
* ensure the rva field is big enough
*/
if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end
&& ip_in_bb (cfg, cfg->cbb, next_ip)
&& (len_ins->opcode == OP_ICONST)
&& (data_ptr = initialize_array_data (cfg, method,
cfg->compile_aot, next_ip, end, klass,
len_ins->inst_c0, &data_size, &field_token,
&il_op, &next_ip))) {
MonoMethod *memcpy_method = mini_get_memcpy_method ();
MonoInst *iargs [3];
int add_reg = alloc_ireg_mp (cfg);
EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
if (cfg->compile_aot) {
EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
} else {
EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
}
EMIT_NEW_ICONST (cfg, iargs [2], data_size);
mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
}
break;
}
case MONO_CEE_LDLEN:
--sp;
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_LDLEN);
ins->dreg = alloc_preg (cfg);
ins->sreg1 = sp [0]->dreg;
ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length);
ins->type = STACK_I4;
/* This flag will be inherited by the decomposition */
ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sp [0]->dreg);
*sp++ = ins;
break;
case MONO_CEE_LDELEMA:
sp -= 2;
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
cfg->flags |= MONO_CFG_HAS_LDELEMA;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
/* we need to make sure that this array is exactly the type it needs
* to be for correctness. the wrappers are lax with their usage
* so we need to ignore them here
*/
if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
MonoClass *array_class = mono_class_create_array (klass, 1);
mini_emit_check_array_type (cfg, sp [0], array_class);
CHECK_TYPELOAD (array_class);
}
readonly = FALSE;
ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
*sp++ = ins;
break;
case MONO_CEE_LDELEM:
case MONO_CEE_LDELEM_I1:
case MONO_CEE_LDELEM_U1:
case MONO_CEE_LDELEM_I2:
case MONO_CEE_LDELEM_U2:
case MONO_CEE_LDELEM_I4:
case MONO_CEE_LDELEM_U4:
case MONO_CEE_LDELEM_I8:
case MONO_CEE_LDELEM_I:
case MONO_CEE_LDELEM_R4:
case MONO_CEE_LDELEM_R8:
case MONO_CEE_LDELEM_REF: {
MonoInst *addr;
sp -= 2;
if (il_op == MONO_CEE_LDELEM) {
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_class_init_internal (klass);
}
else
klass = array_access_to_klass (il_op);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
cfg->flags |= MONO_CFG_HAS_LDELEMA;
if (mini_is_gsharedvt_variable_klass (klass)) {
// FIXME-VT: OP_ICONST optimization
addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
ins->opcode = OP_LOADV_MEMBASE;
} else if (sp [1]->opcode == OP_ICONST) {
int array_reg = sp [0]->dreg;
int index_reg = sp [1]->dreg;
int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset);
} else {
addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE, FALSE);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
}
*sp++ = ins;
break;
}
case MONO_CEE_STELEM_I:
case MONO_CEE_STELEM_I1:
case MONO_CEE_STELEM_I2:
case MONO_CEE_STELEM_I4:
case MONO_CEE_STELEM_I8:
case MONO_CEE_STELEM_R4:
case MONO_CEE_STELEM_R8:
case MONO_CEE_STELEM_REF:
case MONO_CEE_STELEM: {
sp -= 3;
cfg->flags |= MONO_CFG_HAS_LDELEMA;
if (il_op == MONO_CEE_STELEM) {
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
mono_class_init_internal (klass);
}
else
klass = array_access_to_klass (il_op);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]);
mini_emit_array_store (cfg, klass, sp, TRUE);
inline_costs += 1;
break;
}
case MONO_CEE_CKFINITE: {
--sp;
if (cfg->llvm_only) {
MonoInst *iargs [1];
iargs [0] = sp [0];
*sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
} else {
sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]);
MONO_INST_NEW (cfg, ins, OP_CKFINITE);
ins->sreg1 = sp [0]->dreg;
ins->dreg = alloc_freg (cfg);
ins->type = STACK_R8;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = mono_decompose_opcode (cfg, ins);
}
break;
}
case MONO_CEE_REFANYVAL: {
MonoInst *src_var, *src;
int klass_reg = alloc_preg (cfg);
int dreg = alloc_preg (cfg);
GSHAREDVT_FAILURE (il_op);
MONO_INST_NEW (cfg, ins, il_op);
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
context_used = mini_class_check_context_used (cfg, klass);
// FIXME:
src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
if (!src_var)
src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
if (context_used) {
MonoInst *klass_ins;
klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
klass, MONO_RGCTX_INFO_KLASS);
// FIXME:
MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
} else {
mini_emit_class_check (cfg, klass_reg, klass);
}
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
ins->type = STACK_MP;
ins->klass = klass;
*sp++ = ins;
break;
}
case MONO_CEE_MKREFANY: {
MonoInst *loc, *addr;
GSHAREDVT_FAILURE (il_op);
MONO_INST_NEW (cfg, ins, il_op);
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
context_used = mini_class_check_context_used (cfg, klass);
loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL);
EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
int type_reg = alloc_preg (cfg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ());
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
ins->type = STACK_VTYPE;
ins->klass = mono_defaults.typed_reference_class;
*sp++ = ins;
break;
}
case MONO_CEE_LDTOKEN: {
gpointer handle;
MonoClass *handle_class;
if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
handle = mono_method_get_wrapper_data (method, n);
handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
if (handle_class == mono_defaults.typehandle_class)
handle = m_class_get_byval_arg ((MonoClass*)handle);
}
else {
handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, cfg->error);
CHECK_CFG_ERROR;
}
if (!handle)
LOAD_ERROR;
mono_class_init_internal (handle_class);
if (cfg->gshared) {
if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
/* This case handles ldtoken
of an open type, like for
typeof(Gen<>). */
context_used = 0;
} else if (handle_class == mono_defaults.typehandle_class) {
context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle));
} else if (handle_class == mono_defaults.fieldhandle_class)
context_used = mini_class_check_context_used (cfg, m_field_get_parent (((MonoClassField*)handle)));
else if (handle_class == mono_defaults.methodhandle_class)
context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
else
g_assert_not_reached ();
}
{
if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) &&
((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) &&
(cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) &&
(cmethod->klass == mono_defaults.systemtype_class) &&
(strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle);
mono_class_init_internal (tclass);
// Optimize to true/false if next instruction is `call instance bool Type::get_IsValueType()`
guchar *is_vt_ip;
guint32 is_vt_token;
if ((is_vt_ip = il_read_call (next_ip + 5, end, &is_vt_token)) && ip_in_bb (cfg, cfg->cbb, is_vt_ip)) {
MonoMethod *is_vt_method = mini_get_method (cfg, method, is_vt_token, NULL, generic_context);
if (is_vt_method->klass == mono_defaults.systemtype_class &&
!mini_is_gsharedvt_variable_klass (tclass) &&
!mono_class_is_open_constructed_type (m_class_get_byval_arg (tclass)) &&
!strcmp ("get_IsValueType", is_vt_method->name)) {
next_ip = is_vt_ip;
EMIT_NEW_ICONST (cfg, ins, m_class_is_valuetype (tclass) ? 1 : 0);
ins->type = STACK_I4;
*sp++ = ins;
break;
}
}
if (context_used) {
MONO_INST_NEW (cfg, ins, OP_RTTYPE);
ins->dreg = alloc_ireg_ref (cfg);
ins->inst_p0 = tclass;
ins->type = STACK_OBJ;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
cfg->cbb->needs_decompose = TRUE;
} else if (cfg->compile_aot) {
if (method->wrapper_type) {
error_init (error); //got to do it since there are multiple conditionals below
if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) {
/* Special case for static synchronized wrappers */
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context);
} else {
mono_error_cleanup (error); /* FIXME don't swallow the error */
/* FIXME: n is not a normal token */
DISABLE_AOT (cfg);
EMIT_NEW_PCONST (cfg, ins, NULL);
}
} else {
EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
}
} else {
MonoReflectionType *rt = mono_type_get_object_checked ((MonoType *)handle, cfg->error);
CHECK_CFG_ERROR;
EMIT_NEW_PCONST (cfg, ins, rt);
}
ins->type = STACK_OBJ;
ins->klass = mono_defaults.runtimetype_class;
il_op = (MonoOpcodeEnum)next_ip [0];
next_ip += 5;
} else {
MonoInst *addr, *vtvar;
vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
if (context_used) {
if (handle_class == mono_defaults.typehandle_class) {
ins = mini_emit_get_rgctx_klass (cfg, context_used,
mono_class_from_mono_type_internal ((MonoType *)handle),
MONO_RGCTX_INFO_TYPE);
} else if (handle_class == mono_defaults.methodhandle_class) {
ins = emit_get_rgctx_method (cfg, context_used,
(MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
} else if (handle_class == mono_defaults.fieldhandle_class) {
ins = emit_get_rgctx_field (cfg, context_used,
(MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
} else {
g_assert_not_reached ();
}
} else if (cfg->compile_aot) {
EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
} else {
EMIT_NEW_PCONST (cfg, ins, handle);
}
EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
}
}
*sp++ = ins;
break;
}
case MONO_CEE_THROW:
if (sp [-1]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_THROW);
--sp;
ins->sreg1 = sp [0]->dreg;
cfg->cbb->out_of_line = TRUE;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
/* This can complicate code generation for llvm since the return value might not be defined */
if (COMPILE_LLVM (cfg))
INLINE_FAILURE ("throw");
break;
case MONO_CEE_ENDFINALLY:
if (!ip_in_finally_clause (cfg, ip - header->code))
UNVERIFIED;
/* mono_save_seq_point_info () depends on this */
if (sp != stack_start)
emit_seq_point (cfg, method, ip, FALSE, FALSE);
MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
ins_has_side_effect = FALSE;
/*
* Control will leave the method so empty the stack, otherwise
* the next basic block will start with a nonempty stack.
*/
while (sp != stack_start) {
sp--;
}
break;
case MONO_CEE_LEAVE:
case MONO_CEE_LEAVE_S: {
GList *handlers;
/* empty the stack */
g_assert (sp >= stack_start);
sp = stack_start;
/*
* If this leave statement is in a catch block, check for a
* pending exception, and rethrow it if necessary.
* We avoid doing this in runtime invoke wrappers, since those are called
* by native code which excepts the wrapper to catch all exceptions.
*/
for (i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
/*
* Use <= in the final comparison to handle clauses with multiple
* leave statements, like in bug #78024.
* The ordering of the exception clauses guarantees that we find the
* innermost clause.
*/
if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
MonoInst *exc_ins;
MonoBasicBlock *dont_throw;
/*
MonoInst *load;
NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
*/
exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
NEW_BBLOCK (cfg, dont_throw);
/*
* Currently, we always rethrow the abort exception, despite the
* fact that this is not correct. See thread6.cs for an example.
* But propagating the abort exception is more important than
* getting the semantics right.
*/
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
MONO_START_BB (cfg, dont_throw);
}
}
#ifdef ENABLE_LLVM
cfg->cbb->try_end = (intptr_t)(ip - header->code);
#endif
if ((handlers = mono_find_leave_clauses (cfg, ip, target))) {
GList *tmp;
/*
* For each finally clause that we exit we need to invoke the finally block.
* After each invocation we need to add try holes for all the clauses that
* we already exited.
*/
for (tmp = handlers; tmp; tmp = tmp->next) {
MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data;
MonoExceptionClause *clause = leave->clause;
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
continue;
MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
MonoBasicBlock *dont_throw;
/*
* Emit instrumentation code before linking the basic blocks below as this
* will alter cfg->cbb.
*/
mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause);
tblock = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (tblock);
link_bblock (cfg, cfg->cbb, tblock);
MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
ins->inst_eh_blocks = tmp;
MONO_ADD_INS (cfg->cbb, ins);
cfg->cbb->has_call_handler = 1;
/* Throw exception if exvar is set */
/* FIXME Do we need this for calls from catch/filter ? */
NEW_BBLOCK (cfg, dont_throw);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL);
cfg->cbb->clause_holes = tmp;
MONO_START_BB (cfg, dont_throw);
cfg->cbb->clause_holes = tmp;
if (COMPILE_LLVM (cfg)) {
MonoBasicBlock *target_bb;
/*
* Link the finally bblock with the target, since it will
* conceptually branch there.
*/
GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
GET_BBLOCK (cfg, target_bb, target);
link_bblock (cfg, tblock, target_bb);
}
}
}
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (cfg->cbb, ins);
GET_BBLOCK (cfg, tblock, target);
link_bblock (cfg, cfg->cbb, tblock);
ins->inst_target_bb = tblock;
start_new_bblock = 1;
break;
}
/*
* Mono specific opcodes
*/
case MONO_CEE_MONO_ICALL: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
const MonoJitICallId jit_icall_id = (MonoJitICallId)token;
MonoJitICallInfo * const info = mono_find_jit_icall_info (jit_icall_id);
CHECK_STACK (info->sig->param_count);
sp -= info->sig->param_count;
if (token == MONO_JIT_ICALL_mono_threads_attach_coop) {
MonoInst *addr;
MonoBasicBlock *next_bb;
if (cfg->compile_aot) {
/*
* This is called on unattached threads, so it cannot go through the trampoline
* infrastructure. Use an indirect call through a got slot initialized at load time
* instead.
*/
EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
} else {
ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
}
/*
* Parts of the initlocals code needs to come after this, since it might call methods like memset.
* Also profiling needs to be after attach.
*/
init_localsbb2 = cfg->cbb;
NEW_BBLOCK (cfg, next_bb);
MONO_START_BB (cfg, next_bb);
} else {
if (token == MONO_JIT_ICALL_mono_threads_detach_coop) {
/* can't emit profiling code after a detach, so emit it now */
mini_profiler_emit_leave (cfg, NULL);
detached_before_ret = TRUE;
}
ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
}
if (!MONO_TYPE_IS_VOID (info->sig->ret))
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
MonoJumpInfoType ldptr_type;
case MONO_CEE_MONO_LDPTR_CARD_TABLE:
ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_NURSERY_START:
ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_NURSERY_BITS:
ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG:
ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG;
goto mono_ldptr;
case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT;
mono_ldptr:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
case MONO_CEE_MONO_LDPTR: {
gpointer ptr;
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
ptr = mono_method_get_wrapper_data (method, token);
EMIT_NEW_PCONST (cfg, ins, ptr);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
/* Can't embed random pointers into AOT code */
DISABLE_AOT (cfg);
break;
}
case MONO_CEE_MONO_JIT_ICALL_ADDR:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, GUINT_TO_POINTER (token));
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
case MONO_CEE_MONO_ICALL_ADDR: {
MonoMethod *cmethod;
gpointer ptr;
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
if (cfg->compile_aot) {
if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
/*
* This is generated by emit_native_wrapper () to resolve the pinvoke address
* before the call, its not needed when using direct pinvoke.
* This is not an optimization, but its used to avoid looking up pinvokes
* on platforms which don't support dlopen ().
*/
EMIT_NEW_PCONST (cfg, ins, NULL);
} else {
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
}
} else {
ptr = mono_lookup_internal_call (cmethod);
g_assert (ptr);
EMIT_NEW_PCONST (cfg, ins, ptr);
}
*sp++ = ins;
break;
}
case MONO_CEE_MONO_VTADDR: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
MonoInst *src_var, *src;
--sp;
// FIXME:
src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
*sp++ = src;
break;
}
case MONO_CEE_MONO_NEWOBJ: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
MonoInst *iargs [2];
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
mono_class_init_internal (klass);
NEW_CLASSCONST (cfg, iargs [0], klass);
MONO_ADD_INS (cfg->cbb, iargs [0]);
*sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_MONO_OBJADDR:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
--sp;
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = alloc_ireg_mp (cfg);
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_MONO_LDNATIVEOBJ:
/*
* Similar to LDOBJ, but instead load the unmanaged
* representation of the vtype to the stack.
*/
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
--sp;
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
g_assert (m_class_is_valuetype (klass));
mono_class_init_internal (klass);
{
MonoInst *src, *dest, *temp;
src = sp [0];
temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL);
temp->backend.is_pinvoke = 1;
EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
dest->type = STACK_VTYPE;
dest->klass = klass;
*sp ++ = dest;
}
break;
case MONO_CEE_MONO_RETOBJ: {
/*
* Same as RET, but return the native representation of a vtype
* to the caller.
*/
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
g_assert (cfg->ret);
g_assert (mono_method_signature_internal (method)->pinvoke);
--sp;
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
if (!cfg->vret_addr) {
g_assert (cfg->ret_var_is_local);
EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
} else {
EMIT_NEW_RETLOADA (cfg, ins);
}
mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
if (sp != stack_start)
UNVERIFIED;
if (!detached_before_ret)
mini_profiler_emit_leave (cfg, sp [0]);
MONO_INST_NEW (cfg, ins, OP_BR);
ins->inst_target_bb = end_bblock;
MONO_ADD_INS (cfg->cbb, ins);
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
}
case MONO_CEE_MONO_SAVE_LMF:
case MONO_CEE_MONO_RESTORE_LMF:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
break;
case MONO_CEE_MONO_CLASSCONST:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
case MONO_CEE_MONO_METHODCONST:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_METHODCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
*sp++ = ins;
break;
case MONO_CEE_MONO_PINVOKE_ADDR_CACHE: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
MonoMethod *pinvoke_method = (MonoMethod*)mono_method_get_wrapper_data (method, token);
/* This is a memory slot used by the wrapper */
if (cfg->compile_aot) {
EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_PINVOKE_ADDR_CACHE, pinvoke_method);
} else {
gpointer addr = mono_mem_manager_alloc0 (cfg->mem_manager, sizeof (gpointer));
EMIT_NEW_PCONST (cfg, ins, addr);
}
*sp++ = ins;
break;
}
case MONO_CEE_MONO_NOT_TAKEN:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
cfg->cbb->out_of_line = TRUE;
break;
case MONO_CEE_MONO_TLS: {
MonoTlsKey key;
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
key = (MonoTlsKey)n;
g_assert (key < TLS_KEY_NUM);
ins = mono_create_tls_get (cfg, key);
g_assert (ins);
ins->type = STACK_PTR;
*sp++ = ins;
break;
}
case MONO_CEE_MONO_DYN_CALL: {
MonoCallInst *call;
/* It would be easier to call a trampoline, but that would put an
* extra frame on the stack, confusing exception handling. So
* implement it inline using an opcode for now.
*/
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
if (!cfg->dyn_call_var) {
cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* prevent it from being register allocated */
cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
}
/* Has to use a call inst since local regalloc expects it */
MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
ins = (MonoInst*)call;
sp -= 2;
ins->sreg1 = sp [0]->dreg;
ins->sreg2 = sp [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
/* OP_DYN_CALL might need to allocate a dynamically sized param area */
cfg->flags |= MONO_CFG_HAS_ALLOCA;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_MONO_MEMORY_BARRIER: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
mini_emit_memory_barrier (cfg, (int)n);
break;
}
case MONO_CEE_MONO_ATOMIC_STORE_I4: {
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
sp -= 2;
MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
ins->dreg = sp [0]->dreg;
ins->sreg1 = sp [1]->dreg;
ins->backend.memory_barrier_kind = (int)n;
MONO_ADD_INS (cfg->cbb, ins);
break;
}
case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: {
CHECK_STACK (1);
--sp;
dreg = alloc_preg (cfg);
EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
*sp++ = ins;
break;
}
case MONO_CEE_MONO_CALLI_EXTRA_ARG: {
MonoInst *addr;
MonoMethodSignature *fsig;
MonoInst *arg;
/*
* This is the same as CEE_CALLI, but passes an additional argument
* to the called method in llvmonly mode.
* This is only used by delegate invoke wrappers to call the
* actual delegate method.
*/
g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
ins = NULL;
cmethod = NULL;
CHECK_STACK (1);
--sp;
addr = *sp;
fsig = mini_get_signature (method, token, generic_context, cfg->error);
CHECK_CFG_ERROR;
if (cfg->llvm_only)
cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
n = fsig->param_count + fsig->hasthis + 1;
CHECK_STACK (n);
sp -= n;
arg = sp [n - 1];
if (cfg->llvm_only) {
/*
* The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
* cconv. This is set by mono_init_delegate ().
*/
if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
MonoInst *callee = addr;
MonoInst *call, *localloc_ins;
MonoBasicBlock *is_gsharedvt_bb, *end_bb;
int low_bit_reg = alloc_preg (cfg);
NEW_BBLOCK (cfg, is_gsharedvt_bb);
NEW_BBLOCK (cfg, end_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
/* Normal case: callee uses a normal cconv, have to add an out wrapper */
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
/*
* ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
*/
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
MONO_ADD_INS (cfg->cbb, ins);
localloc_ins = ins;
cfg->flags |= MONO_CFG_HAS_ALLOCA;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
MONO_START_BB (cfg, is_gsharedvt_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
ins->dreg = call->dreg;
MONO_START_BB (cfg, end_bb);
} else {
/* Caller uses a normal calling conv */
MonoInst *callee = addr;
MonoInst *call, *localloc_ins;
MonoBasicBlock *is_gsharedvt_bb, *end_bb;
int low_bit_reg = alloc_preg (cfg);
NEW_BBLOCK (cfg, is_gsharedvt_bb);
NEW_BBLOCK (cfg, end_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
/* Normal case: callee uses a normal cconv, no conversion is needed */
call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
/* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
MONO_START_BB (cfg, is_gsharedvt_bb);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
MONO_ADD_INS (cfg->cbb, addr);
/*
* ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
*/
MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
ins->dreg = alloc_preg (cfg);
ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
MONO_ADD_INS (cfg->cbb, ins);
localloc_ins = ins;
cfg->flags |= MONO_CFG_HAS_ALLOCA;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
ins->dreg = call->dreg;
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, end_bb);
}
} else {
/* Same as CEE_CALLI */
if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
/*
* We pass the address to the gsharedvt trampoline in the rgctx reg
*/
MonoInst *callee = addr;
addr = emit_get_rgctx_sig (cfg, context_used,
fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
} else {
ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
}
}
if (!MONO_TYPE_IS_VOID (fsig->ret))
*sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
CHECK_CFG_EXCEPTION;
ins_flag = 0;
constrained_class = NULL;
break;
}
case MONO_CEE_MONO_LDDOMAIN: {
MonoDomain *domain = mono_get_root_domain ();
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : domain);
*sp++ = ins;
break;
}
case MONO_CEE_MONO_SAVE_LAST_ERROR:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
// Just an IL prefix, setting this flag, picked up by call instructions.
save_last_error = TRUE;
break;
case MONO_CEE_MONO_GET_RGCTX_ARG:
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
mono_create_rgctx_var (cfg);
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = alloc_dreg (cfg, STACK_PTR);
ins->sreg1 = cfg->rgctx_var->dreg;
ins->type = STACK_PTR;
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
case MONO_CEE_MONO_GET_SP: {
/* Used by COOP only, so this is good enough */
MonoInst *var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
EMIT_NEW_VARLOADA (cfg, ins, var, NULL);
*sp++ = ins;
break;
}
case MONO_CEE_MONO_REMAP_OVF_EXC:
/* Remap the exception thrown by the next _OVF opcode */
g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
ovf_exc = (const char*)mono_method_get_wrapper_data (method, token);
break;
case MONO_CEE_ARGLIST: {
/* somewhat similar to LDTOKEN */
MonoInst *addr, *vtvar;
vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL);
EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
ins->type = STACK_VTYPE;
ins->klass = mono_defaults.argumenthandle_class;
*sp++ = ins;
break;
}
case MONO_CEE_CEQ:
case MONO_CEE_CGT:
case MONO_CEE_CGT_UN:
case MONO_CEE_CLT:
case MONO_CEE_CLT_UN: {
MonoInst *cmp, *arg1, *arg2;
sp -= 2;
arg1 = sp [0];
arg2 = sp [1];
/*
* The following transforms:
* CEE_CEQ into OP_CEQ
* CEE_CGT into OP_CGT
* CEE_CGT_UN into OP_CGT_UN
* CEE_CLT into OP_CLT
* CEE_CLT_UN into OP_CLT_UN
*/
MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
MONO_INST_NEW (cfg, ins, cmp->opcode);
cmp->sreg1 = arg1->dreg;
cmp->sreg2 = arg2->dreg;
type_from_op (cfg, cmp, arg1, arg2);
CHECK_TYPE (cmp);
add_widen_op (cfg, cmp, &arg1, &arg2);
if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
cmp->opcode = OP_LCOMPARE;
else if (arg1->type == STACK_R4)
cmp->opcode = OP_RCOMPARE;
else if (arg1->type == STACK_R8)
cmp->opcode = OP_FCOMPARE;
else
cmp->opcode = OP_ICOMPARE;
MONO_ADD_INS (cfg->cbb, cmp);
ins->type = STACK_I4;
ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
type_from_op (cfg, ins, arg1, arg2);
if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
/*
* The backends expect the fceq opcodes to do the
* comparison too.
*/
ins->sreg1 = cmp->sreg1;
ins->sreg2 = cmp->sreg2;
NULLIFY_INS (cmp);
}
MONO_ADD_INS (cfg->cbb, ins);
*sp++ = ins;
break;
}
case MONO_CEE_LDFTN: {
MonoInst *argconst;
MonoMethod *cil_method;
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
CHECK_CFG_ERROR;
if (constrained_class) {
if (m_method_is_static (cmethod) && mini_class_check_context_used (cfg, constrained_class))
// FIXME:
GENERIC_SHARING_FAILURE (CEE_LDFTN);
cmethod = get_constrained_method (cfg, image, n, cmethod, constrained_class, generic_context);
constrained_class = NULL;
CHECK_CFG_ERROR;
}
mono_class_init_internal (cmethod->klass);
mono_save_token_info (cfg, image, n, cmethod);
context_used = mini_method_check_context_used (cfg, cmethod);
cil_method = cmethod;
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
emit_method_access_failure (cfg, method, cil_method);
const gboolean has_unmanaged_callers_only =
cmethod->wrapper_type == MONO_WRAPPER_NONE &&
mono_method_has_unmanaged_callers_only_attribute (cmethod);
/*
* Optimize the common case of ldftn+delegate creation
*/
if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins, *handle_ins;
MonoMethod *invoke;
int invoke_context_used;
if (G_UNLIKELY (has_unmanaged_callers_only)) {
mono_error_set_not_supported (cfg->error, "Cannot create delegate from method with UnmanagedCallersOnlyAttribute");
CHECK_CFG_ERROR;
}
invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
if (!invoke || !mono_method_signature_internal (invoke))
LOAD_ERROR;
invoke_context_used = mini_method_check_context_used (cfg, invoke);
target_ins = sp [-1];
if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
/*BAD IMPL: We must not add a null check for virtual invoke delegates.*/
if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
}
}
if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) {
if (cfg->verbose_level > 3)
g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) {
sp --;
*sp = handle_ins;
CHECK_CFG_EXCEPTION;
sp ++;
next_ip += 5;
il_op = MONO_CEE_NEWOBJ;
break;
} else {
CHECK_CFG_ERROR;
}
}
}
}
/* UnmanagedCallersOnlyAttribute means ldftn should return a method callable from native */
if (G_UNLIKELY (has_unmanaged_callers_only)) {
if (G_UNLIKELY (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
// Follow CoreCLR, disallow [UnmanagedCallersOnly] and [DllImport] to be used
// together
emit_not_supported_failure (cfg);
EMIT_NEW_PCONST (cfg, ins, NULL);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
MonoClass *delegate_klass = NULL;
MonoGCHandle target_handle = 0;
ERROR_DECL (wrapper_error);
MonoMethod *wrapped_cmethod;
wrapped_cmethod = mono_marshal_get_managed_wrapper (cmethod, delegate_klass, target_handle, wrapper_error);
if (!is_ok (wrapper_error)) {
/* if we couldn't create a wrapper because cmethod isn't supposed to have an
UnmanagedCallersOnly attribute, follow CoreCLR behavior and throw when the
method with the ldftn is executing, not when it is being compiled. */
emit_invalid_program_with_msg (cfg, wrapper_error, method, cmethod);
mono_error_cleanup (wrapper_error);
EMIT_NEW_PCONST (cfg, ins, NULL);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
} else {
cmethod = wrapped_cmethod;
}
}
argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
*sp++ = ins;
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_LDVIRTFTN: {
MonoInst *args [2];
cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
CHECK_CFG_ERROR;
mono_class_init_internal (cmethod->klass);
context_used = mini_method_check_context_used (cfg, cmethod);
/*
* Optimize the common case of ldvirtftn+delegate creation
*/
if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins, *handle_ins;
MonoMethod *invoke;
int invoke_context_used;
const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0;
invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
if (!invoke || !mono_method_signature_internal (invoke))
LOAD_ERROR;
invoke_context_used = mini_method_check_context_used (cfg, invoke);
target_ins = sp [-1];
if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) {
if (cfg->verbose_level > 3)
g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) {
sp -= 2;
*sp = handle_ins;
CHECK_CFG_EXCEPTION;
next_ip += 5;
previous_il_op = MONO_CEE_NEWOBJ;
sp ++;
break;
} else {
CHECK_CFG_ERROR;
}
}
}
}
--sp;
args [0] = *sp;
args [1] = emit_get_rgctx_method (cfg, context_used,
cmethod, MONO_RGCTX_INFO_METHOD);
if (context_used)
*sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
else
*sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
inline_costs += CALL_COST * MIN(10, num_calls++);
break;
}
case MONO_CEE_LOCALLOC: {
MonoBasicBlock *non_zero_bb, *end_bb;
int alloc_ptr = alloc_preg (cfg);
--sp;
if (sp != stack_start)
UNVERIFIED;
if (cfg->method != method)
/*
* Inlining this into a loop in a parent could lead to
* stack overflows which is different behavior than the
* non-inlined case, thus disable inlining in this case.
*/
INLINE_FAILURE("localloc");
NEW_BBLOCK (cfg, non_zero_bb);
NEW_BBLOCK (cfg, end_bb);
/* if size != zero */
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
//size is zero, so result is NULL
MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
MONO_START_BB (cfg, non_zero_bb);
MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
ins->dreg = alloc_ptr;
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_PTR;
MONO_ADD_INS (cfg->cbb, ins);
cfg->flags |= MONO_CFG_HAS_ALLOCA;
if (header->init_locals)
ins->flags |= MONO_INST_INIT;
MONO_START_BB (cfg, end_bb);
EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
ins->type = STACK_PTR;
*sp++ = ins;
break;
}
case MONO_CEE_ENDFILTER: {
MonoExceptionClause *clause, *nearest;
int cc;
--sp;
if ((sp != stack_start) || (sp [0]->type != STACK_I4))
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
ins->sreg1 = (*sp)->dreg;
MONO_ADD_INS (cfg->cbb, ins);
start_new_bblock = 1;
nearest = NULL;
for (cc = 0; cc < header->num_clauses; ++cc) {
clause = &header->clauses [cc];
if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) &&
(!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
nearest = clause;
}
g_assert (nearest);
if ((next_ip - header->code) != nearest->handler_offset)
UNVERIFIED;
break;
}
case MONO_CEE_UNALIGNED_:
ins_flag |= MONO_INST_UNALIGNED;
/* FIXME: record alignment? we can assume 1 for now */
break;
case MONO_CEE_VOLATILE_:
ins_flag |= MONO_INST_VOLATILE;
break;
case MONO_CEE_TAIL_:
ins_flag |= MONO_INST_TAILCALL;
cfg->flags |= MONO_CFG_HAS_TAILCALL;
/* Can't inline tailcalls at this time */
inline_costs += 100000;
break;
case MONO_CEE_INITOBJ:
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (mini_class_is_reference (klass))
MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
else
mini_emit_initobj (cfg, *sp, NULL, klass);
inline_costs += 1;
break;
case MONO_CEE_CONSTRAINED_:
constrained_class = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (constrained_class);
ins_has_side_effect = FALSE;
break;
case MONO_CEE_CPBLK:
sp -= 3;
mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
case MONO_CEE_INITBLK:
sp -= 3;
mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
ins_flag = 0;
inline_costs += 1;
break;
case MONO_CEE_NO_:
if (ip [2] & CEE_NO_TYPECHECK)
ins_flag |= MONO_INST_NOTYPECHECK;
if (ip [2] & CEE_NO_RANGECHECK)
ins_flag |= MONO_INST_NORANGECHECK;
if (ip [2] & CEE_NO_NULLCHECK)
ins_flag |= MONO_INST_NONULLCHECK;
break;
case MONO_CEE_RETHROW: {
MonoInst *load;
int handler_offset = -1;
for (i = 0; i < header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
handler_offset = clause->handler_offset;
break;
}
}
cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
if (handler_offset == -1)
UNVERIFIED;
EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
MONO_INST_NEW (cfg, ins, OP_RETHROW);
ins->sreg1 = load->dreg;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
break;
}
case MONO_CEE_MONO_RETHROW: {
if (sp [-1]->type != STACK_OBJ)
UNVERIFIED;
MONO_INST_NEW (cfg, ins, OP_RETHROW);
--sp;
ins->sreg1 = sp [0]->dreg;
cfg->cbb->out_of_line = TRUE;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
MONO_ADD_INS (cfg->cbb, ins);
sp = stack_start;
link_bblock (cfg, cfg->cbb, end_bblock);
start_new_bblock = 1;
/* This can complicate code generation for llvm since the return value might not be defined */
if (COMPILE_LLVM (cfg))
INLINE_FAILURE ("mono_rethrow");
break;
}
case MONO_CEE_SIZEOF: {
guint32 val;
int ialign;
if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) {
MonoType *type = mono_type_create_from_typespec_checked (image, token, cfg->error);
CHECK_CFG_ERROR;
val = mono_type_size (type, &ialign);
EMIT_NEW_ICONST (cfg, ins, val);
} else {
MonoClass *klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
if (mini_is_gsharedvt_klass (klass)) {
ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_SIZEOF);
ins->type = STACK_I4;
} else {
val = mono_type_size (m_class_get_byval_arg (klass), &ialign);
EMIT_NEW_ICONST (cfg, ins, val);
}
}
*sp++ = ins;
break;
}
case MONO_CEE_REFANYTYPE: {
MonoInst *src_var, *src;
GSHAREDVT_FAILURE (il_op);
--sp;
// FIXME:
src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
if (!src_var)
src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
*sp++ = ins;
break;
}
case MONO_CEE_READONLY_:
readonly = TRUE;
break;
case MONO_CEE_UNUSED56:
case MONO_CEE_UNUSED57:
case MONO_CEE_UNUSED70:
case MONO_CEE_UNUSED:
case MONO_CEE_UNUSED99:
case MONO_CEE_UNUSED58:
case MONO_CEE_UNUSED1:
UNVERIFIED;
default:
g_warning ("opcode 0x%02x not handled", il_op);
UNVERIFIED;
}
if (ins_has_side_effect)
cfg->cbb->flags |= BB_HAS_SIDE_EFFECTS;
}
if (start_new_bblock != 1)
UNVERIFIED;
cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
if (cfg->cbb->next_bb) {
/* This could already be set because of inlining, #693905 */
MonoBasicBlock *bb = cfg->cbb;
while (bb->next_bb)
bb = bb->next_bb;
bb->next_bb = end_bblock;
} else {
cfg->cbb->next_bb = end_bblock;
}
#if defined(TARGET_POWERPC) || defined(TARGET_X86)
if (cfg->compile_aot)
/* FIXME: The plt slots require a GOT var even if the method doesn't use it */
mono_get_got_var (cfg);
#endif
#ifdef TARGET_WASM
if (cfg->lmf_var && !cfg->deopt) {
// mini_llvmonly_pop_lmf () might be called before emit_push_lmf () so initialize the LMF
cfg->cbb = init_localsbb;
EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
int lmf_reg = ins->dreg;
EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_IMM, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), 0);
}
#endif
if (cfg->method == method && cfg->got_var)
mono_emit_load_got_addr (cfg);
if (init_localsbb) {
cfg->cbb = init_localsbb;
cfg->ip = NULL;
for (i = 0; i < header->num_locals; ++i) {
/*
* Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
* which need the trampoline code to work.
*/
if (MONO_TYPE_ISSTRUCT (header->locals [i]))
cfg->cbb = init_localsbb2;
else
cfg->cbb = init_localsbb;
emit_init_local (cfg, i, header->locals [i], init_locals);
}
}
if (cfg->init_ref_vars && cfg->method == method) {
/* Emit initialization for ref vars */
// FIXME: Avoid duplication initialization for IL locals.
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *ins = cfg->varinfo [i];
if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
}
}
if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
cfg->cbb = init_localsbb;
emit_push_lmf (cfg);
}
/* emit profiler enter code after a jit attach if there is one */
cfg->cbb = init_localsbb2;
mini_profiler_emit_enter (cfg);
cfg->cbb = init_localsbb;
if (seq_points) {
MonoBasicBlock *bb;
/*
* Make seq points at backward branch targets interruptable.
*/
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
}
/* Add a sequence point for method entry/exit events */
if (seq_points && cfg->gen_sdb_seq_points) {
NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
MONO_ADD_INS (init_localsbb, ins);
NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
MONO_ADD_INS (cfg->bb_exit, ins);
}
/*
* Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
* the code they refer to was dead (#11880).
*/
if (sym_seq_points) {
for (i = 0; i < header->code_size; ++i) {
if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
MonoInst *ins;
NEW_SEQ_POINT (cfg, ins, i, FALSE);
mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
}
}
}
cfg->ip = NULL;
if (cfg->method == method) {
compute_bb_regions (cfg);
} else {
MonoBasicBlock *bb;
/* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
bb->real_offset = inline_offset;
}
}
if (inline_costs < 0) {
char *mname;
/* Method is too large */
mname = mono_method_full_name (method, TRUE);
mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
g_free (mname);
}
if ((cfg->verbose_level > 2) && (cfg->method == method))
mono_print_code (cfg, "AFTER METHOD-TO-IR");
goto cleanup;
mono_error_exit:
if (cfg->verbose_level > 3)
g_print ("exiting due to error");
g_assert (!is_ok (cfg->error));
goto cleanup;
exception_exit:
if (cfg->verbose_level > 3)
g_print ("exiting due to exception");
g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
goto cleanup;
unverified:
if (cfg->verbose_level > 3)
g_print ("exiting due to invalid il");
set_exception_type_from_invalid_il (cfg, method, ip);
goto cleanup;
cleanup:
g_slist_free (class_inits);
mono_basic_block_free (original_bb);
cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
if (cfg->exception_type)
return -1;
else
return inline_costs;
}
static int
store_membase_reg_to_store_membase_imm (int opcode)
{
switch (opcode) {
case OP_STORE_MEMBASE_REG:
return OP_STORE_MEMBASE_IMM;
case OP_STOREI1_MEMBASE_REG:
return OP_STOREI1_MEMBASE_IMM;
case OP_STOREI2_MEMBASE_REG:
return OP_STOREI2_MEMBASE_IMM;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMBASE_IMM;
case OP_STOREI8_MEMBASE_REG:
return OP_STOREI8_MEMBASE_IMM;
default:
g_assert_not_reached ();
}
return -1;
}
int
mono_op_to_op_imm (int opcode)
{
switch (opcode) {
case OP_IADD:
return OP_IADD_IMM;
case OP_ISUB:
return OP_ISUB_IMM;
case OP_IDIV:
return OP_IDIV_IMM;
case OP_IDIV_UN:
return OP_IDIV_UN_IMM;
case OP_IREM:
return OP_IREM_IMM;
case OP_IREM_UN:
return OP_IREM_UN_IMM;
case OP_IMUL:
return OP_IMUL_IMM;
case OP_IAND:
return OP_IAND_IMM;
case OP_IOR:
return OP_IOR_IMM;
case OP_IXOR:
return OP_IXOR_IMM;
case OP_ISHL:
return OP_ISHL_IMM;
case OP_ISHR:
return OP_ISHR_IMM;
case OP_ISHR_UN:
return OP_ISHR_UN_IMM;
case OP_LADD:
return OP_LADD_IMM;
case OP_LSUB:
return OP_LSUB_IMM;
case OP_LAND:
return OP_LAND_IMM;
case OP_LOR:
return OP_LOR_IMM;
case OP_LXOR:
return OP_LXOR_IMM;
case OP_LSHL:
return OP_LSHL_IMM;
case OP_LSHR:
return OP_LSHR_IMM;
case OP_LSHR_UN:
return OP_LSHR_UN_IMM;
#if SIZEOF_REGISTER == 8
case OP_LMUL:
return OP_LMUL_IMM;
case OP_LREM:
return OP_LREM_IMM;
#endif
case OP_COMPARE:
return OP_COMPARE_IMM;
case OP_ICOMPARE:
return OP_ICOMPARE_IMM;
case OP_LCOMPARE:
return OP_LCOMPARE_IMM;
case OP_STORE_MEMBASE_REG:
return OP_STORE_MEMBASE_IMM;
case OP_STOREI1_MEMBASE_REG:
return OP_STOREI1_MEMBASE_IMM;
case OP_STOREI2_MEMBASE_REG:
return OP_STOREI2_MEMBASE_IMM;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMBASE_IMM;
#if defined(TARGET_X86) || defined (TARGET_AMD64)
case OP_X86_PUSH:
return OP_X86_PUSH_IMM;
case OP_X86_COMPARE_MEMBASE_REG:
return OP_X86_COMPARE_MEMBASE_IMM;
#endif
#if defined(TARGET_AMD64)
case OP_AMD64_ICOMPARE_MEMBASE_REG:
return OP_AMD64_ICOMPARE_MEMBASE_IMM;
#endif
case OP_VOIDCALL_REG:
return OP_VOIDCALL;
case OP_CALL_REG:
return OP_CALL;
case OP_LCALL_REG:
return OP_LCALL;
case OP_FCALL_REG:
return OP_FCALL;
case OP_LOCALLOC:
return OP_LOCALLOC_IMM;
}
return -1;
}
int
mono_load_membase_to_load_mem (int opcode)
{
// FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_LOAD_MEMBASE:
return OP_LOAD_MEM;
case OP_LOADU1_MEMBASE:
return OP_LOADU1_MEM;
case OP_LOADU2_MEMBASE:
return OP_LOADU2_MEM;
case OP_LOADI4_MEMBASE:
return OP_LOADI4_MEM;
case OP_LOADU4_MEMBASE:
return OP_LOADU4_MEM;
#if SIZEOF_REGISTER == 8
case OP_LOADI8_MEMBASE:
return OP_LOADI8_MEM;
#endif
}
#endif
return -1;
}
static int
op_to_op_dest_membase (int store_opcode, int opcode)
{
#if defined(TARGET_X86)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
return -1;
switch (opcode) {
case OP_IADD:
return OP_X86_ADD_MEMBASE_REG;
case OP_ISUB:
return OP_X86_SUB_MEMBASE_REG;
case OP_IAND:
return OP_X86_AND_MEMBASE_REG;
case OP_IOR:
return OP_X86_OR_MEMBASE_REG;
case OP_IXOR:
return OP_X86_XOR_MEMBASE_REG;
case OP_ADD_IMM:
case OP_IADD_IMM:
return OP_X86_ADD_MEMBASE_IMM;
case OP_SUB_IMM:
case OP_ISUB_IMM:
return OP_X86_SUB_MEMBASE_IMM;
case OP_AND_IMM:
case OP_IAND_IMM:
return OP_X86_AND_MEMBASE_IMM;
case OP_OR_IMM:
case OP_IOR_IMM:
return OP_X86_OR_MEMBASE_IMM;
case OP_XOR_IMM:
case OP_IXOR_IMM:
return OP_X86_XOR_MEMBASE_IMM;
case OP_MOVE:
return OP_NOP;
}
#endif
#if defined(TARGET_AMD64)
if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
return -1;
switch (opcode) {
case OP_IADD:
return OP_X86_ADD_MEMBASE_REG;
case OP_ISUB:
return OP_X86_SUB_MEMBASE_REG;
case OP_IAND:
return OP_X86_AND_MEMBASE_REG;
case OP_IOR:
return OP_X86_OR_MEMBASE_REG;
case OP_IXOR:
return OP_X86_XOR_MEMBASE_REG;
case OP_IADD_IMM:
return OP_X86_ADD_MEMBASE_IMM;
case OP_ISUB_IMM:
return OP_X86_SUB_MEMBASE_IMM;
case OP_IAND_IMM:
return OP_X86_AND_MEMBASE_IMM;
case OP_IOR_IMM:
return OP_X86_OR_MEMBASE_IMM;
case OP_IXOR_IMM:
return OP_X86_XOR_MEMBASE_IMM;
case OP_LADD:
return OP_AMD64_ADD_MEMBASE_REG;
case OP_LSUB:
return OP_AMD64_SUB_MEMBASE_REG;
case OP_LAND:
return OP_AMD64_AND_MEMBASE_REG;
case OP_LOR:
return OP_AMD64_OR_MEMBASE_REG;
case OP_LXOR:
return OP_AMD64_XOR_MEMBASE_REG;
case OP_ADD_IMM:
case OP_LADD_IMM:
return OP_AMD64_ADD_MEMBASE_IMM;
case OP_SUB_IMM:
case OP_LSUB_IMM:
return OP_AMD64_SUB_MEMBASE_IMM;
case OP_AND_IMM:
case OP_LAND_IMM:
return OP_AMD64_AND_MEMBASE_IMM;
case OP_OR_IMM:
case OP_LOR_IMM:
return OP_AMD64_OR_MEMBASE_IMM;
case OP_XOR_IMM:
case OP_LXOR_IMM:
return OP_AMD64_XOR_MEMBASE_IMM;
case OP_MOVE:
return OP_NOP;
}
#endif
return -1;
}
static int
op_to_op_store_membase (int store_opcode, int opcode)
{
#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_ICEQ:
if (store_opcode == OP_STOREI1_MEMBASE_REG)
return OP_X86_SETEQ_MEMBASE;
case OP_CNE:
if (store_opcode == OP_STOREI1_MEMBASE_REG)
return OP_X86_SETNE_MEMBASE;
}
#endif
return -1;
}
static int
op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
{
#ifdef TARGET_X86
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
return OP_X86_COMPARE_MEMBASE8_IMM;
*/
if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
return -1;
switch (opcode) {
case OP_X86_PUSH:
return OP_X86_PUSH_MEMBASE;
case OP_COMPARE_IMM:
case OP_ICOMPARE_IMM:
return OP_X86_COMPARE_MEMBASE_IMM;
case OP_COMPARE:
case OP_ICOMPARE:
return OP_X86_COMPARE_MEMBASE_REG;
}
#endif
#ifdef TARGET_AMD64
/* FIXME: This has sign extension issues */
/*
if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
return OP_X86_COMPARE_MEMBASE8_IMM;
*/
switch (opcode) {
case OP_X86_PUSH:
if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_X86_PUSH_MEMBASE;
break;
/* FIXME: This only works for 32 bit immediates
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_AMD64_COMPARE_MEMBASE_IMM;
*/
case OP_ICOMPARE_IMM:
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
return OP_AMD64_ICOMPARE_MEMBASE_IMM;
break;
case OP_COMPARE:
case OP_LCOMPARE:
if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
return OP_AMD64_ICOMPARE_MEMBASE_REG;
if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
return OP_AMD64_COMPARE_MEMBASE_REG;
break;
case OP_ICOMPARE:
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
return OP_AMD64_ICOMPARE_MEMBASE_REG;
break;
}
#endif
return -1;
}
static int
op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
{
#ifdef TARGET_X86
if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
return -1;
switch (opcode) {
case OP_COMPARE:
case OP_ICOMPARE:
return OP_X86_COMPARE_REG_MEMBASE;
case OP_IADD:
return OP_X86_ADD_REG_MEMBASE;
case OP_ISUB:
return OP_X86_SUB_REG_MEMBASE;
case OP_IAND:
return OP_X86_AND_REG_MEMBASE;
case OP_IOR:
return OP_X86_OR_REG_MEMBASE;
case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
}
#endif
#ifdef TARGET_AMD64
if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
switch (opcode) {
case OP_ICOMPARE:
return OP_AMD64_ICOMPARE_REG_MEMBASE;
case OP_IADD:
return OP_X86_ADD_REG_MEMBASE;
case OP_ISUB:
return OP_X86_SUB_REG_MEMBASE;
case OP_IAND:
return OP_X86_AND_REG_MEMBASE;
case OP_IOR:
return OP_X86_OR_REG_MEMBASE;
case OP_IXOR:
return OP_X86_XOR_REG_MEMBASE;
}
} else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
switch (opcode) {
case OP_COMPARE:
case OP_LCOMPARE:
return OP_AMD64_COMPARE_REG_MEMBASE;
case OP_LADD:
return OP_AMD64_ADD_REG_MEMBASE;
case OP_LSUB:
return OP_AMD64_SUB_REG_MEMBASE;
case OP_LAND:
return OP_AMD64_AND_REG_MEMBASE;
case OP_LOR:
return OP_AMD64_OR_REG_MEMBASE;
case OP_LXOR:
return OP_AMD64_XOR_REG_MEMBASE;
}
}
#endif
return -1;
}
int
mono_op_to_op_imm_noemul (int opcode)
{
MONO_DISABLE_WARNING(4065) // switch with default but no case
switch (opcode) {
#if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
case OP_LSHR:
case OP_LSHL:
case OP_LSHR_UN:
return -1;
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
return -1;
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV)
case OP_IMUL:
return -1;
#endif
default:
return mono_op_to_op_imm (opcode);
}
MONO_RESTORE_WARNING
}
gboolean
mono_op_no_side_effects (int opcode)
{
/* FIXME: Add more instructions */
/* INEG sets the condition codes, and the OP_LNEG decomposition depends on this on x86 */
switch (opcode) {
case OP_MOVE:
case OP_FMOVE:
case OP_VMOVE:
case OP_XMOVE:
case OP_RMOVE:
case OP_VZERO:
case OP_XZERO:
case OP_ICONST:
case OP_I8CONST:
case OP_ADD_IMM:
case OP_R8CONST:
case OP_LADD_IMM:
case OP_ISUB_IMM:
case OP_IADD_IMM:
case OP_LNEG:
case OP_ISUB:
case OP_CMOV_IGE:
case OP_ISHL_IMM:
case OP_ISHR_IMM:
case OP_ISHR_UN_IMM:
case OP_IAND_IMM:
case OP_ICONV_TO_U1:
case OP_ICONV_TO_I1:
case OP_SEXT_I4:
case OP_LCONV_TO_U1:
case OP_ICONV_TO_U2:
case OP_ICONV_TO_I2:
case OP_LCONV_TO_I2:
case OP_LDADDR:
case OP_PHI:
case OP_NOP:
case OP_ZEXT_I4:
case OP_NOT_NULL:
case OP_IL_SEQ_POINT:
case OP_RTTYPE:
return TRUE;
default:
return FALSE;
}
}
gboolean
mono_ins_no_side_effects (MonoInst *ins)
{
if (mono_op_no_side_effects (ins->opcode))
return TRUE;
if (ins->opcode == OP_AOTCONST) {
MonoJumpInfoType type = (MonoJumpInfoType)(intptr_t)ins->inst_p1;
// Some AOTCONSTs have side effects
switch (type) {
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_VTABLE:
case MONO_PATCH_INFO_METHOD_RGCTX:
return TRUE;
}
}
return FALSE;
}
/**
* mono_handle_global_vregs:
*
* Make vregs used in more than one bblock 'global', i.e. allocate a variable
* for them.
*/
void
mono_handle_global_vregs (MonoCompile *cfg)
{
gint32 *vreg_to_bb;
MonoBasicBlock *bb;
int i, pos;
vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
#ifdef MONO_ARCH_SIMD_INTRINSICS
if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION)
mono_simd_simplify_indirection (cfg);
#endif
/* Find local vregs used in more than one bb */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins = bb->code;
int block_num = bb->block_num;
if (cfg->verbose_level > 2)
printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
cfg->cbb = bb;
for (; ins; ins = ins->next) {
const char *spec = INS_INFO (ins->opcode);
int regtype = 0, regindex;
gint32 prev_bb;
if (G_UNLIKELY (cfg->verbose_level > 2))
mono_print_ins (ins);
g_assert (ins->opcode >= MONO_CEE_LAST);
for (regindex = 0; regindex < 4; regindex ++) {
int vreg = 0;
if (regindex == 0) {
regtype = spec [MONO_INST_DEST];
if (regtype == ' ')
continue;
vreg = ins->dreg;
} else if (regindex == 1) {
regtype = spec [MONO_INST_SRC1];
if (regtype == ' ')
continue;
vreg = ins->sreg1;
} else if (regindex == 2) {
regtype = spec [MONO_INST_SRC2];
if (regtype == ' ')
continue;
vreg = ins->sreg2;
} else if (regindex == 3) {
regtype = spec [MONO_INST_SRC3];
if (regtype == ' ')
continue;
vreg = ins->sreg3;
}
#if SIZEOF_REGISTER == 4
/* In the LLVM case, the long opcodes are not decomposed */
if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
/*
* Since some instructions reference the original long vreg,
* and some reference the two component vregs, it is quite hard
* to determine when it needs to be global. So be conservative.
*/
if (!get_vreg_to_inst (cfg, vreg)) {
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
if (cfg->verbose_level > 2)
printf ("LONG VREG R%d made global.\n", vreg);
}
/*
* Make the component vregs volatile since the optimizations can
* get confused otherwise.
*/
get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
}
#endif
g_assert (vreg != -1);
prev_bb = vreg_to_bb [vreg];
if (prev_bb == 0) {
/* 0 is a valid block num */
vreg_to_bb [vreg] = block_num + 1;
} else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
continue;
if (!get_vreg_to_inst (cfg, vreg)) {
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
switch (regtype) {
case 'i':
if (vreg_is_ref (cfg, vreg))
mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg);
else
mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg);
break;
case 'l':
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
break;
case 'f':
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg);
break;
case 'v':
case 'x':
mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg);
break;
default:
g_assert_not_reached ();
}
}
/* Flag as having been used in more than one bb */
vreg_to_bb [vreg] = -1;
}
}
}
}
/* If a variable is used in only one bblock, convert it into a local vreg */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *var = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
switch (var->type) {
case STACK_I4:
case STACK_OBJ:
case STACK_PTR:
case STACK_MP:
case STACK_VTYPE:
#if SIZEOF_REGISTER == 8
case STACK_I8:
#endif
#if !defined(TARGET_X86)
/* Enabling this screws up the fp stack on x86 */
case STACK_R8:
#endif
if (mono_arch_is_soft_float ())
break;
/*
if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
break;
*/
/* Arguments are implicitly global */
/* Putting R4 vars into registers doesn't work currently */
/* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
/*
* Make that the variable's liveness interval doesn't contain a call, since
* that would cause the lvreg to be spilled, making the whole optimization
* useless.
*/
/* This is too slow for JIT compilation */
#if 0
if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
MonoInst *ins;
int def_index, call_index, ins_index;
gboolean spilled = FALSE;
def_index = -1;
call_index = -1;
ins_index = 0;
for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
const char *spec = INS_INFO (ins->opcode);
if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
def_index = ins_index;
if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
if (call_index > def_index) {
spilled = TRUE;
break;
}
}
if (MONO_IS_CALL (ins))
call_index = ins_index;
ins_index ++;
}
if (spilled)
break;
}
#endif
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
var->flags |= MONO_INST_IS_DEAD;
cfg->vreg_to_inst [var->dreg] = NULL;
}
break;
}
}
/*
* Compress the varinfo and vars tables so the liveness computation is faster and
* takes up less space.
*/
pos = 0;
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *var = cfg->varinfo [i];
if (pos < i && cfg->locals_start == i)
cfg->locals_start = pos;
if (!(var->flags & MONO_INST_IS_DEAD)) {
if (pos < i) {
cfg->varinfo [pos] = cfg->varinfo [i];
cfg->varinfo [pos]->inst_c0 = pos;
memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
cfg->vars [pos].idx = pos;
#if SIZEOF_REGISTER == 4
if (cfg->varinfo [pos]->type == STACK_I8) {
/* Modify the two component vars too */
MonoInst *var1;
var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
var1->inst_c0 = pos;
var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
var1->inst_c0 = pos;
}
#endif
}
pos ++;
}
}
cfg->num_varinfo = pos;
if (cfg->locals_start > cfg->num_varinfo)
cfg->locals_start = cfg->num_varinfo;
}
/*
* mono_allocate_gsharedvt_vars:
*
* Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
* Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
*/
void
mono_allocate_gsharedvt_vars (MonoCompile *cfg)
{
int i;
cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *ins = cfg->varinfo [i];
int idx;
if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
if (i >= cfg->locals_start) {
/* Local */
idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
ins->opcode = OP_GSHAREDVT_LOCAL;
ins->inst_imm = idx;
} else {
/* Arg */
cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
}
}
}
}
/**
* mono_spill_global_vars:
*
* Generate spill code for variables which are not allocated to registers,
* and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
* code is generated which could be optimized by the local optimization passes.
*/
void
mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
{
MonoBasicBlock *bb;
char spec2 [16];
int orig_next_vreg;
guint32 *vreg_to_lvreg;
guint32 *lvregs;
guint32 i, lvregs_len, lvregs_size;
gboolean dest_has_lvreg = FALSE;
MonoStackType stacktypes [128];
MonoInst **live_range_start, **live_range_end;
MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
*need_local_opts = FALSE;
memset (spec2, 0, sizeof (spec2));
/* FIXME: Move this function to mini.c */
stacktypes [(int)'i'] = STACK_PTR;
stacktypes [(int)'l'] = STACK_I8;
stacktypes [(int)'f'] = STACK_R8;
#ifdef MONO_ARCH_SIMD_INTRINSICS
stacktypes [(int)'x'] = STACK_VTYPE;
#endif
#if SIZEOF_REGISTER == 4
/* Create MonoInsts for longs */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
switch (ins->type) {
case STACK_R8:
case STACK_I8: {
MonoInst *tree;
if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
break;
g_assert (ins->opcode == OP_REGOFFSET);
tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
g_assert (tree);
tree->opcode = OP_REGOFFSET;
tree->inst_basereg = ins->inst_basereg;
tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
g_assert (tree);
tree->opcode = OP_REGOFFSET;
tree->inst_basereg = ins->inst_basereg;
tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
break;
}
default:
break;
}
}
}
#endif
if (cfg->compute_gc_maps) {
/* registers need liveness info even for !non refs */
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
if (ins->opcode == OP_REGVAR)
ins->flags |= MONO_INST_GC_TRACK;
}
}
/* FIXME: widening and truncation */
/*
* As an optimization, when a variable allocated to the stack is first loaded into
* an lvreg, we will remember the lvreg and use it the next time instead of loading
* the variable again.
*/
orig_next_vreg = cfg->next_vreg;
vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
lvregs_size = 1024;
lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
lvregs_len = 0;
/*
* These arrays contain the first and last instructions accessing a given
* variable.
* Since we emit bblocks in the same order we process them here, and we
* don't split live ranges, these will precisely describe the live range of
* the variable, i.e. the instruction range where a valid value can be found
* in the variables location.
* The live range is computed using the liveness info computed by the liveness pass.
* We can't use vmv->range, since that is an abstract live range, and we need
* one which is instruction precise.
* FIXME: Variables used in out-of-line bblocks have a hole in their live range.
*/
/* FIXME: Only do this if debugging info is requested */
live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
/* Add spill loads/stores */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
if (cfg->verbose_level > 2)
printf ("\nSPILL BLOCK %d:\n", bb->block_num);
/* Clear vreg_to_lvreg array */
for (i = 0; i < lvregs_len; i++)
vreg_to_lvreg [lvregs [i]] = 0;
lvregs_len = 0;
cfg->cbb = bb;
MONO_BB_FOR_EACH_INS (bb, ins) {
const char *spec = INS_INFO (ins->opcode);
int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
gboolean store, no_lvreg;
int sregs [MONO_MAX_SRC_REGS];
if (G_UNLIKELY (cfg->verbose_level > 2))
mono_print_ins (ins);
if (ins->opcode == OP_NOP)
continue;
/*
* We handle LDADDR here as well, since it can only be decomposed
* when variable addresses are known.
*/
if (ins->opcode == OP_LDADDR) {
MonoInst *var = (MonoInst *)ins->inst_p0;
if (var->opcode == OP_VTARG_ADDR) {
/* Happens on SPARC/S390 where vtypes are passed by reference */
MonoInst *vtaddr = var->inst_left;
if (vtaddr->opcode == OP_REGVAR) {
ins->opcode = OP_MOVE;
ins->sreg1 = vtaddr->dreg;
}
else if (var->inst_left->opcode == OP_REGOFFSET) {
ins->opcode = OP_LOAD_MEMBASE;
ins->inst_basereg = vtaddr->inst_basereg;
ins->inst_offset = vtaddr->inst_offset;
} else
NOT_IMPLEMENTED;
} else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
/* gsharedvt arg passed by ref */
g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
ins->opcode = OP_LOAD_MEMBASE;
ins->inst_basereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
} else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
MonoInst *load, *load2, *load3;
int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
int reg1, reg2, reg3;
MonoInst *info_var = cfg->gsharedvt_info_var;
MonoInst *locals_var = cfg->gsharedvt_locals_var;
/*
* gsharedvt local.
* Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
*/
g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
g_assert (info_var);
g_assert (locals_var);
/* Mark the instruction used to compute the locals var as used */
cfg->gsharedvt_locals_var_ins = NULL;
/* Load the offset */
if (info_var->opcode == OP_REGOFFSET) {
reg1 = alloc_ireg (cfg);
NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
} else if (info_var->opcode == OP_REGVAR) {
load = NULL;
reg1 = info_var->dreg;
} else {
g_assert_not_reached ();
}
reg2 = alloc_ireg (cfg);
NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
/* Load the locals area address */
reg3 = alloc_ireg (cfg);
if (locals_var->opcode == OP_REGOFFSET) {
NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
} else if (locals_var->opcode == OP_REGVAR) {
NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
} else {
g_assert_not_reached ();
}
/* Compute the address */
ins->opcode = OP_PADD;
ins->sreg1 = reg3;
ins->sreg2 = reg2;
mono_bblock_insert_before_ins (bb, ins, load3);
mono_bblock_insert_before_ins (bb, load3, load2);
if (load)
mono_bblock_insert_before_ins (bb, load2, load);
} else {
g_assert (var->opcode == OP_REGOFFSET);
ins->opcode = OP_ADD_IMM;
ins->sreg1 = var->inst_basereg;
ins->inst_imm = var->inst_offset;
}
*need_local_opts = TRUE;
spec = INS_INFO (ins->opcode);
}
if (ins->opcode < MONO_CEE_LAST) {
mono_print_ins (ins);
g_assert_not_reached ();
}
/*
* Store opcodes have destbasereg in the dreg, but in reality, it is an
* src register.
* FIXME:
*/
if (MONO_IS_STORE_MEMBASE (ins)) {
tmp_reg = ins->dreg;
ins->dreg = ins->sreg2;
ins->sreg2 = tmp_reg;
store = TRUE;
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (MONO_IS_STORE_MEMINDEX (ins))
g_assert_not_reached ();
else
store = FALSE;
no_lvreg = FALSE;
if (G_UNLIKELY (cfg->verbose_level > 2)) {
printf ("\t %.3s %d", spec, ins->dreg);
num_sregs = mono_inst_get_src_registers (ins, sregs);
for (srcindex = 0; srcindex < num_sregs; ++srcindex)
printf (" %d", sregs [srcindex]);
printf ("\n");
}
/***************/
/* DREG */
/***************/
regtype = spec [MONO_INST_DEST];
g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
prev_dreg = -1;
int dreg_using_dest_to_membase_op = -1;
if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
MonoInst *store_ins;
int store_opcode;
MonoInst *def_ins = ins;
int dreg = ins->dreg; /* The original vreg */
store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
if (var->opcode == OP_REGVAR) {
ins->dreg = var->dreg;
} else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
/*
* Instead of emitting a load+store, use a _membase opcode.
*/
g_assert (var->opcode == OP_REGOFFSET);
if (ins->opcode == OP_MOVE) {
NULLIFY_INS (ins);
def_ins = NULL;
} else {
dreg_using_dest_to_membase_op = ins->dreg;
ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
ins->inst_basereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
ins->dreg = -1;
}
spec = INS_INFO (ins->opcode);
} else {
guint32 lvreg;
g_assert (var->opcode == OP_REGOFFSET);
prev_dreg = ins->dreg;
/* Invalidate any previous lvreg for this vreg */
vreg_to_lvreg [ins->dreg] = 0;
lvreg = 0;
if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
regtype = 'l';
store_opcode = OP_STOREI8_MEMBASE_REG;
}
ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
mono_bblock_insert_after_ins (bb, ins, store_ins);
NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
mono_bblock_insert_after_ins (bb, ins, store_ins);
def_ins = store_ins;
}
else
#endif
{
g_assert (store_opcode != OP_STOREV_MEMBASE);
/* Try to fuse the store into the instruction itself */
/* FIXME: Add more instructions */
if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
ins->inst_imm = ins->inst_c0;
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
spec = INS_INFO (ins->opcode);
} else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
ins->opcode = store_opcode;
ins->inst_destbasereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
no_lvreg = TRUE;
tmp_reg = ins->dreg;
ins->dreg = ins->sreg2;
ins->sreg2 = tmp_reg;
store = TRUE;
spec2 [MONO_INST_DEST] = ' ';
spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
spec2 [MONO_INST_SRC3] = ' ';
spec = spec2;
} else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
// FIXME: The backends expect the base reg to be in inst_basereg
ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
ins->dreg = -1;
ins->inst_basereg = var->inst_basereg;
ins->inst_offset = var->inst_offset;
spec = INS_INFO (ins->opcode);
} else {
/* printf ("INS: "); mono_print_ins (ins); */
/* Create a store instruction */
NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
/* Insert it after the instruction */
mono_bblock_insert_after_ins (bb, ins, store_ins);
def_ins = store_ins;
/*
* We can't assign ins->dreg to var->dreg here, since the
* sregs could use it. So set a flag, and do it after
* the sregs.
*/
if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
dest_has_lvreg = TRUE;
}
}
}
if (def_ins && !live_range_start [dreg]) {
live_range_start [dreg] = def_ins;
live_range_start_bb [dreg] = bb;
}
if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
tmp->inst_c1 = dreg;
mono_bblock_insert_after_ins (bb, def_ins, tmp);
}
}
/************/
/* SREGS */
/************/
num_sregs = mono_inst_get_src_registers (ins, sregs);
for (srcindex = 0; srcindex < 3; ++srcindex) {
regtype = spec [MONO_INST_SRC1 + srcindex];
sreg = sregs [srcindex];
g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
MonoInst *var = get_vreg_to_inst (cfg, sreg);
MonoInst *use_ins = ins;
MonoInst *load_ins;
guint32 load_opcode;
if (var->opcode == OP_REGVAR) {
sregs [srcindex] = var->dreg;
//mono_inst_set_src_registers (ins, sregs);
live_range_end [sreg] = use_ins;
live_range_end_bb [sreg] = bb;
if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
/* var->dreg is a hreg */
tmp->inst_c1 = sreg;
mono_bblock_insert_after_ins (bb, ins, tmp);
}
continue;
}
g_assert (var->opcode == OP_REGOFFSET);
load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
g_assert (load_opcode != OP_LOADV_MEMBASE);
if (vreg_to_lvreg [sreg]) {
g_assert (vreg_to_lvreg [sreg] != -1);
/* The variable is already loaded to an lvreg */
if (G_UNLIKELY (cfg->verbose_level > 2))
printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
sregs [srcindex] = vreg_to_lvreg [sreg];
//mono_inst_set_src_registers (ins, sregs);
continue;
}
/* Try to fuse the load into the instruction */
if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
sregs [0] = var->inst_basereg;
//mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
sregs [1] = var->inst_basereg;
//mono_inst_set_src_registers (ins, sregs);
ins->inst_offset = var->inst_offset;
} else {
if (MONO_IS_REAL_MOVE (ins)) {
ins->opcode = OP_NOP;
sreg = ins->dreg;
} else {
//printf ("%d ", srcindex); mono_print_ins (ins);
sreg = alloc_dreg (cfg, stacktypes [regtype]);
if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
if (var->dreg == prev_dreg) {
/*
* sreg refers to the value loaded by the load
* emitted below, but we need to use ins->dreg
* since it refers to the store emitted earlier.
*/
sreg = ins->dreg;
}
g_assert (sreg != -1);
if (var->dreg == dreg_using_dest_to_membase_op) {
if (cfg->verbose_level > 2)
printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg);
} else {
vreg_to_lvreg [var->dreg] = sreg;
}
if (lvregs_len >= lvregs_size) {
guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
lvregs = new_lvregs;
lvregs_size *= 2;
}
lvregs [lvregs_len ++] = var->dreg;
}
}
sregs [srcindex] = sreg;
//mono_inst_set_src_registers (ins, sregs);
#if SIZEOF_REGISTER != 8
if (regtype == 'l') {
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
mono_bblock_insert_before_ins (bb, ins, load_ins);
use_ins = load_ins;
}
else
#endif
{
#if SIZEOF_REGISTER == 4
g_assert (load_opcode != OP_LOADI8_MEMBASE);
#endif
NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
mono_bblock_insert_before_ins (bb, ins, load_ins);
use_ins = load_ins;
}
if (cfg->verbose_level > 2)
mono_print_ins_index (0, use_ins);
}
if (var->dreg < orig_next_vreg) {
live_range_end [var->dreg] = use_ins;
live_range_end_bb [var->dreg] = bb;
}
if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
MonoInst *tmp;
MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
tmp->inst_c1 = var->dreg;
mono_bblock_insert_after_ins (bb, ins, tmp);
}
}
}
mono_inst_set_src_registers (ins, sregs);
if (dest_has_lvreg) {
g_assert (ins->dreg != -1);
vreg_to_lvreg [prev_dreg] = ins->dreg;
if (lvregs_len >= lvregs_size) {
guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
lvregs = new_lvregs;
lvregs_size *= 2;
}
lvregs [lvregs_len ++] = prev_dreg;
dest_has_lvreg = FALSE;
}
if (store) {
tmp_reg = ins->dreg;
ins->dreg = ins->sreg2;
ins->sreg2 = tmp_reg;
}
if (MONO_IS_CALL (ins)) {
/* Clear vreg_to_lvreg array */
for (i = 0; i < lvregs_len; i++)
vreg_to_lvreg [lvregs [i]] = 0;
lvregs_len = 0;
} else if (ins->opcode == OP_NOP) {
ins->dreg = -1;
MONO_INST_NULLIFY_SREGS (ins);
}
if (cfg->verbose_level > 2)
mono_print_ins_index (1, ins);
}
/* Extend the live range based on the liveness info */
if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
for (i = 0; i < cfg->num_varinfo; i ++) {
MonoMethodVar *vi = MONO_VARINFO (cfg, i);
if (vreg_is_volatile (cfg, vi->vreg))
/* The liveness info is incomplete */
continue;
if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
/* Live from at least the first ins of this bb */
live_range_start [vi->vreg] = bb->code;
live_range_start_bb [vi->vreg] = bb;
}
if (mono_bitset_test_fast (bb->live_out_set, i)) {
/* Live at least until the last ins of this bb */
live_range_end [vi->vreg] = bb->last_ins;
live_range_end_bb [vi->vreg] = bb;
}
}
}
}
/*
* Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
* by storing the current native offset into MonoMethodVar->live_range_start/end.
*/
if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
for (i = 0; i < cfg->num_varinfo; ++i) {
int vreg = MONO_VARINFO (cfg, i)->vreg;
MonoInst *ins;
if (live_range_start [vreg]) {
MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
ins->inst_c0 = i;
ins->inst_c1 = vreg;
mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
}
if (live_range_end [vreg]) {
MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
ins->inst_c0 = i;
ins->inst_c1 = vreg;
if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
mono_add_ins_to_end (live_range_end_bb [vreg], ins);
else
mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
}
}
}
if (cfg->gsharedvt_locals_var_ins) {
/* Nullify if unused */
cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
cfg->gsharedvt_locals_var_ins->inst_imm = 0;
}
g_free (live_range_start);
g_free (live_range_end);
g_free (live_range_start_bb);
g_free (live_range_end_bb);
}
/**
* FIXME:
* - use 'iadd' instead of 'int_add'
* - handling ovf opcodes: decompose in method_to_ir.
* - unify iregs/fregs
* -> partly done, the missing parts are:
* - a more complete unification would involve unifying the hregs as well, so
* code wouldn't need if (fp) all over the place. but that would mean the hregs
* would no longer map to the machine hregs, so the code generators would need to
* be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
* wouldn't work any more. Duplicating the code in mono_local_regalloc () into
* fp/non-fp branches speeds it up by about 15%.
* - use sext/zext opcodes instead of shifts
* - add OP_ICALL
* - get rid of TEMPLOADs if possible and use vregs instead
* - clean up usage of OP_P/OP_ opcodes
* - cleanup usage of DUMMY_USE
* - cleanup the setting of ins->type for MonoInst's which are pushed on the
* stack
* - set the stack type and allocate a dreg in the EMIT_NEW macros
* - get rid of all the <foo>2 stuff when the new JIT is ready.
* - make sure handle_stack_args () is called before the branch is emitted
* - when the new IR is done, get rid of all unused stuff
* - COMPARE/BEQ as separate instructions or unify them ?
* - keeping them separate allows specialized compare instructions like
* compare_imm, compare_membase
* - most back ends unify fp compare+branch, fp compare+ceq
* - integrate mono_save_args into inline_method
* - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
* - handle long shift opts on 32 bit platforms somehow: they require
* 3 sregs (2 for arg1 and 1 for arg2)
* - make byref a 'normal' type.
* - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
* variable if needed.
* - do not start a new IL level bblock when cfg->cbb is changed by a function call
* like inline_method.
* - remove inlining restrictions
* - fix LNEG and enable cfold of INEG
* - generalize x86 optimizations like ldelema as a peephole optimization
* - add store_mem_imm for amd64
* - optimize the loading of the interruption flag in the managed->native wrappers
* - avoid special handling of OP_NOP in passes
* - move code inserting instructions into one function/macro.
* - try a coalescing phase after liveness analysis
* - add float -> vreg conversion + local optimizations on !x86
* - figure out how to handle decomposed branches during optimizations, ie.
* compare+branch, op_jump_table+op_br etc.
* - promote RuntimeXHandles to vregs
* - vtype cleanups:
* - add a NEW_VARLOADA_VREG macro
* - the vtype optimizations are blocked by the LDADDR opcodes generated for
* accessing vtype fields.
* - get rid of I8CONST on 64 bit platforms
* - dealing with the increase in code size due to branches created during opcode
* decomposition:
* - use extended basic blocks
* - all parts of the JIT
* - handle_global_vregs () && local regalloc
* - avoid introducing global vregs during decomposition, like 'vtable' in isinst
* - sources of increase in code size:
* - vtypes
* - long compares
* - isinst and castclass
* - lvregs not allocated to global registers even if used multiple times
* - call cctors outside the JIT, to make -v output more readable and JIT timings more
* meaningful.
* - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
* - add all micro optimizations from the old JIT
* - put tree optimizations into the deadce pass
* - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
* specific function.
* - unify the float comparison opcodes with the other comparison opcodes, i.e.
* fcompare + branchCC.
* - create a helper function for allocating a stack slot, taking into account
* MONO_CFG_HAS_SPILLUP.
* - merge r68207.
* - optimize mono_regstate2_alloc_int/float.
* - fix the pessimistic handling of variables accessed in exception handler blocks.
* - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
* parts of the tree could be separated by other instructions, killing the tree
* arguments, or stores killing loads etc. Also, should we fold loads into other
* instructions if the result of the load is used multiple times ?
* - make the REM_IMM optimization in mini-x86.c arch-independent.
* - LAST MERGE: 108395.
* - when returning vtypes in registers, generate IR and append it to the end of the
* last bb instead of doing it in the epilog.
* - change the store opcodes so they use sreg1 instead of dreg to store the base register.
*/
/*
NOTES
-----
- When to decompose opcodes:
- earlier: this makes some optimizations hard to implement, since the low level IR
no longer contains the necessary information. But it is easier to do.
- later: harder to implement, enables more optimizations.
- Branches inside bblocks:
- created when decomposing complex opcodes.
- branches to another bblock: harmless, but not tracked by the branch
optimizations, so need to branch to a label at the start of the bblock.
- branches to inside the same bblock: very problematic, trips up the local
reg allocator. Can be fixed by spitting the current bblock, but that is a
complex operation, since some local vregs can become global vregs etc.
- Local/global vregs:
- local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
local register allocator.
- global vregs: used in more than one bblock. Have an associated MonoMethodVar
structure, created by mono_create_var (). Assigned to hregs or the stack by
the global register allocator.
- When to do optimizations like alu->alu_imm:
- earlier -> saves work later on since the IR will be smaller/simpler
- later -> can work on more instructions
- Handling of valuetypes:
- When a vtype is pushed on the stack, a new temporary is created, an
instruction computing its address (LDADDR) is emitted and pushed on
the stack. Need to optimize cases when the vtype is used immediately as in
argument passing, stloc etc.
- Instead of the to_end stuff in the old JIT, simply call the function handling
the values on the stack before emitting the last instruction of the bb.
*/
#else /* !DISABLE_JIT */
MONO_EMPTY_SOURCE_FILE (method_to_ir);
#endif /* !DISABLE_JIT */
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-amd64.c | /**
* \file
* AMD64 backend for the Mono code generator
*
* Based on mini-x86.c.
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
* Patrik Torstensson
* Zoltan Varga ([email protected])
* Johan Lorensson ([email protected])
*
* (C) 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include <string.h>
#include <math.h>
#include <assert.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/tokentype.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/unlocked.h>
#include "interp/interp.h"
#include "ir-emit.h"
#include "mini-amd64.h"
#include "cpu-amd64.h"
#include "mini-gc.h"
#include "mini-runtime.h"
#include "aot-runtime.h"
#ifdef MONO_XEN_OPT
static gboolean optimize_for_xen = TRUE;
#else
#define optimize_for_xen 0
#endif
static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math")
#define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
#define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
/* The single step trampoline */
static gpointer ss_trampoline;
/* The breakpoint trampoline */
static gpointer bp_trampoline;
/* Offset between fp and the first argument in the callee */
#define ARGS_OFFSET 16
#define GP_SCRATCH_REG AMD64_R11
/* Max number of bblocks before we bail from using more advanced branch placement code */
#define MAX_BBLOCKS_FOR_BRANCH_OPTS 800
/*
* AMD64 register usage:
* - callee saved registers are used for global register allocation
* - %r11 is used for materializing 64 bit constants in opcodes
* - the rest is used for local allocation
*/
/*
* Floating point comparison results:
* ZF PF CF
* A > B 0 0 0
* A < B 0 0 1
* A = B 1 0 0
* A > B 0 0 0
* UNORDERED 1 1 1
*/
const char*
mono_arch_regname (int reg)
{
switch (reg) {
case AMD64_RAX: return "%rax";
case AMD64_RBX: return "%rbx";
case AMD64_RCX: return "%rcx";
case AMD64_RDX: return "%rdx";
case AMD64_RSP: return "%rsp";
case AMD64_RBP: return "%rbp";
case AMD64_RDI: return "%rdi";
case AMD64_RSI: return "%rsi";
case AMD64_R8: return "%r8";
case AMD64_R9: return "%r9";
case AMD64_R10: return "%r10";
case AMD64_R11: return "%r11";
case AMD64_R12: return "%r12";
case AMD64_R13: return "%r13";
case AMD64_R14: return "%r14";
case AMD64_R15: return "%r15";
}
return "unknown";
}
static const char * const packed_xmmregs [] = {
"p:xmm0", "p:xmm1", "p:xmm2", "p:xmm3", "p:xmm4", "p:xmm5", "p:xmm6", "p:xmm7", "p:xmm8",
"p:xmm9", "p:xmm10", "p:xmm11", "p:xmm12", "p:xmm13", "p:xmm14", "p:xmm15"
};
static const char * const single_xmmregs [] = {
"s:xmm0", "s:xmm1", "s:xmm2", "s:xmm3", "s:xmm4", "s:xmm5", "s:xmm6", "s:xmm7", "s:xmm8",
"s:xmm9", "s:xmm10", "s:xmm11", "s:xmm12", "s:xmm13", "s:xmm14", "s:xmm15"
};
const char*
mono_arch_fregname (int reg)
{
if (reg < AMD64_XMM_NREG)
return single_xmmregs [reg];
else
return "unknown";
}
const char *
mono_arch_xregname (int reg)
{
if (reg < AMD64_XMM_NREG)
return packed_xmmregs [reg];
else
return "unknown";
}
static gboolean
debug_omit_fp (void)
{
#if 0
return mono_debug_count ();
#else
return TRUE;
#endif
}
static gboolean
amd64_is_near_call (guint8 *code)
{
/* Skip REX */
if ((code [0] >= 0x40) && (code [0] <= 0x4f))
code += 1;
return code [0] == 0xe8;
}
static gboolean
amd64_use_imm32 (gint64 val)
{
if (mini_debug_options.single_imm_size)
return FALSE;
return amd64_is_imm32 (val);
}
void
mono_x86_patch (unsigned char* code, gpointer target)
{
mono_x86_patch_inline (code, target);
}
static void
amd64_patch (unsigned char* code, gpointer target)
{
// NOTE: Sometimes code has just been generated, is not running yet,
// and has no alignment requirements. Sometimes it could be running while we patch it,
// and there are alignment requirements.
// FIXME Assert alignment.
guint8 rex = 0;
/* Skip REX */
if ((code [0] >= 0x40) && (code [0] <= 0x4f)) {
rex = code [0];
code += 1;
}
if ((code [0] & 0xf8) == 0xb8) {
/* amd64_set_reg_template */
*(guint64*)(code + 1) = (guint64)target;
}
else if ((code [0] == 0x8b) && rex && x86_modrm_mod (code [1]) == 0 && x86_modrm_rm (code [1]) == 5) {
/* mov 0(%rip), %dreg */
g_assert (!1); // Historical code was incorrect.
ptrdiff_t const offset = (guchar*)target - (code + 6);
g_assert (offset == (gint32)offset);
*(gint32*)(code + 2) = (gint32)offset;
}
else if (code [0] == 0xff && (code [1] == 0x15 || code [1] == 0x25)) {
/* call or jmp *<OFFSET>(%rip) */
// Patch the data, not the code.
g_assert (!2); // For possible use later.
*(void**)(code + 6 + *(gint32*)(code + 2)) = target;
}
else
x86_patch (code, target);
}
void
mono_amd64_patch (unsigned char* code, gpointer target)
{
amd64_patch (code, target);
}
#define DEBUG(a) if (cfg->verbose_level > 1) a
static void inline
add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
{
ainfo->offset = *stack_size;
if (*gr >= PARAM_REGS) {
ainfo->storage = ArgOnStack;
ainfo->arg_size = sizeof (target_mgreg_t);
/* Since the same stack slot size is used for all arg */
/* types, it needs to be big enough to hold them all */
(*stack_size) += sizeof (target_mgreg_t);
}
else {
ainfo->storage = ArgInIReg;
ainfo->reg = param_regs [*gr];
(*gr) ++;
}
}
static void inline
add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
{
ainfo->offset = *stack_size;
if (*gr >= FLOAT_PARAM_REGS) {
ainfo->storage = ArgOnStack;
ainfo->arg_size = sizeof (target_mgreg_t);
/* Since the same stack slot size is used for both float */
/* types, it needs to be big enough to hold them both */
(*stack_size) += sizeof (target_mgreg_t);
}
else {
/* A double register */
if (is_double)
ainfo->storage = ArgInDoubleSSEReg;
else
ainfo->storage = ArgInFloatSSEReg;
ainfo->reg = *gr;
(*gr) += 1;
}
}
typedef enum ArgumentClass {
ARG_CLASS_NO_CLASS,
ARG_CLASS_MEMORY,
ARG_CLASS_INTEGER,
ARG_CLASS_SSE
} ArgumentClass;
static ArgumentClass
merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
{
ArgumentClass class2 = ARG_CLASS_NO_CLASS;
MonoType *ptype;
ptype = mini_get_underlying_type (type);
switch (ptype->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
class2 = ARG_CLASS_INTEGER;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
#ifdef TARGET_WIN32
class2 = ARG_CLASS_INTEGER;
#else
class2 = ARG_CLASS_SSE;
#endif
break;
case MONO_TYPE_TYPEDBYREF:
g_assert_not_reached ();
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
class2 = ARG_CLASS_INTEGER;
break;
}
/* fall through */
case MONO_TYPE_VALUETYPE: {
MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
int i;
for (i = 0; i < info->num_fields; ++i) {
class2 = class1;
class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
}
break;
}
default:
g_assert_not_reached ();
}
/* Merge */
if (class1 == class2)
;
else if (class1 == ARG_CLASS_NO_CLASS)
class1 = class2;
else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
class1 = ARG_CLASS_MEMORY;
else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
class1 = ARG_CLASS_INTEGER;
else
class1 = ARG_CLASS_SSE;
return class1;
}
typedef struct {
MonoType *type;
int size, offset;
} StructFieldInfo;
/*
* collect_field_info_nested:
*
* Collect field info from KLASS recursively into FIELDS.
*/
static void
collect_field_info_nested (MonoClass *klass, GArray *fields_array, int offset, gboolean pinvoke, gboolean unicode)
{
MonoMarshalType *info;
int i;
if (pinvoke) {
info = mono_marshal_load_type_info (klass);
g_assert(info);
for (i = 0; i < info->num_fields; ++i) {
if (MONO_TYPE_ISSTRUCT (info->fields [i].field->type)) {
collect_field_info_nested (mono_class_from_mono_type_internal (info->fields [i].field->type), fields_array, info->fields [i].offset, pinvoke, unicode);
} else {
guint32 align;
StructFieldInfo f;
f.type = info->fields [i].field->type;
f.size = mono_marshal_type_size (info->fields [i].field->type,
info->fields [i].mspec,
&align, TRUE, unicode);
f.offset = offset + info->fields [i].offset;
if (i == info->num_fields - 1 && f.size + f.offset < info->native_size) {
/* This can happen with .pack directives eg. 'fixed' arrays */
if (MONO_TYPE_IS_PRIMITIVE (f.type)) {
/* Replicate the last field to fill out the remaining place, since the code in add_valuetype () needs type information */
g_array_append_val (fields_array, f);
while (f.size + f.offset < info->native_size) {
f.offset += f.size;
g_array_append_val (fields_array, f);
}
} else {
f.size = info->native_size - f.offset;
g_array_append_val (fields_array, f);
}
} else {
g_array_append_val (fields_array, f);
}
}
}
} else {
gpointer iter;
MonoClassField *field;
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
if (MONO_TYPE_ISSTRUCT (field->type)) {
collect_field_info_nested (mono_class_from_mono_type_internal (field->type), fields_array, field->offset - MONO_ABI_SIZEOF (MonoObject), pinvoke, unicode);
} else {
int align;
StructFieldInfo f;
f.type = field->type;
f.size = mono_type_size (field->type, &align);
f.offset = field->offset - MONO_ABI_SIZEOF (MonoObject) + offset;
g_array_append_val (fields_array, f);
}
}
}
}
#ifdef TARGET_WIN32
/* Windows x64 ABI can pass/return value types in register of size 1,2,4,8 bytes. */
#define MONO_WIN64_VALUE_TYPE_FITS_REG(arg_size) (arg_size <= SIZEOF_REGISTER && (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8))
static gboolean
allocate_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, const AMD64_Reg_No int_regs [], int int_reg_count, const AMD64_XMM_Reg_No float_regs [], int float_reg_count, guint32 *current_int_reg, guint32 *current_float_reg)
{
gboolean result = FALSE;
assert (arg_info != NULL && int_regs != NULL && float_regs != NULL && current_int_reg != NULL && current_float_reg != NULL);
assert (arg_info->storage == ArgValuetypeInReg || arg_info->storage == ArgValuetypeAddrInIReg);
arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone;
arg_info->pair_regs [0] = arg_info->pair_regs [1] = ArgNone;
arg_info->pair_size [0] = 0;
arg_info->pair_size [1] = 0;
arg_info->nregs = 0;
if (arg_class == ARG_CLASS_INTEGER && *current_int_reg < int_reg_count) {
/* Pass parameter in integer register. */
arg_info->pair_storage [0] = ArgInIReg;
arg_info->pair_regs [0] = int_regs [*current_int_reg];
(*current_int_reg) ++;
result = TRUE;
} else if (arg_class == ARG_CLASS_SSE && *current_float_reg < float_reg_count) {
/* Pass parameter in float register. */
arg_info->pair_storage [0] = (arg_size <= sizeof (gfloat)) ? ArgInFloatSSEReg : ArgInDoubleSSEReg;
arg_info->pair_regs [0] = float_regs [*current_float_reg];
(*current_float_reg) ++;
result = TRUE;
}
if (result == TRUE) {
arg_info->pair_size [0] = arg_size;
arg_info->nregs = 1;
}
return result;
}
static gboolean
allocate_parameter_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg)
{
return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, param_regs, PARAM_REGS, float_param_regs, FLOAT_PARAM_REGS, current_int_reg, current_float_reg);
}
static gboolean
allocate_return_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg)
{
return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, return_regs, RETURN_REGS, float_return_regs, FLOAT_RETURN_REGS, current_int_reg, current_float_reg);
}
static void
allocate_storage_for_valuetype_win64 (ArgInfo *arg_info, MonoType *type, gboolean is_return, ArgumentClass arg_class,
guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size)
{
/* Windows x64 value type ABI.
*
* Parameters: https://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
*
* Integer/Float types smaller than or equals to 8 bytes or porperly sized struct/union (1,2,4,8)
* Try pass in register using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8), if no more registers, pass on stack using ArgOnStack as storage and size of parameter(1,2,4,8).
* Integer/Float types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7).
* Try to pass pointer in register using ArgValuetypeAddrInIReg, if no more registers, pass pointer on stack using ArgValuetypeAddrOnStack as storage and parameter size of register (8 bytes).
*
* Return values: https://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
*
* Integers/Float types smaller than or equal to 8 bytes
* Return in corresponding register RAX/XMM0 using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8).
* Properly sized struct/unions (1,2,4,8)
* Return in register RAX using ArgValuetypeInReg as storage and size of parameter(1,2,4,8).
* Types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7).
* Return pointer to allocated stack space (allocated by caller) using ArgValuetypeAddrInIReg as storage and parameter size.
*/
assert (arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL);
if (!is_return) {
/* Parameter cases. */
if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) {
assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8);
/* First, try to use registers for parameter. If type is struct it can only be passed by value in integer register. */
arg_info->storage = ArgValuetypeInReg;
if (!allocate_parameter_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) {
/* No more registers, fallback passing parameter on stack as value. */
assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0);
/* Passing value directly on stack, so use size of value. */
arg_info->storage = ArgOnStack;
arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t));
arg_info->offset = *stack_size;
arg_info->arg_size = arg_size;
*stack_size += arg_size;
}
} else {
/* Fallback to stack, try to pass address to parameter in register. Always use integer register to represent stack address. */
arg_info->storage = ArgValuetypeAddrInIReg;
if (!allocate_parameter_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) {
/* No more registers, fallback passing address to parameter on stack. */
assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0);
/* Passing an address to value on stack, so use size of register as argument size. */
arg_info->storage = ArgValuetypeAddrOnStack;
arg_size = sizeof (target_mgreg_t);
arg_info->offset = *stack_size;
arg_info->arg_size = arg_size;
*stack_size += arg_size;
}
}
} else {
/* Return value cases. */
if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) {
assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8);
/* Return value fits into return registers. If type is struct it can only be returned by value in integer register. */
arg_info->storage = ArgValuetypeInReg;
allocate_return_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg);
/* Only RAX/XMM0 should be used to return valuetype. */
assert ((arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone) || (arg_info->pair_regs[0] == AMD64_XMM0 && arg_info->pair_regs[1] == ArgNone));
} else {
/* Return value doesn't fit into return register, return address to allocated stack space (allocated by caller and passed as input). */
arg_info->storage = ArgValuetypeAddrInIReg;
allocate_return_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg);
/* Only RAX should be used to return valuetype address. */
assert (arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone);
arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t));
arg_info->offset = *stack_size;
*stack_size += arg_size;
}
}
}
static void
get_valuetype_size_win64 (MonoClass *klass, gboolean pinvoke, ArgInfo *arg_info, MonoType *type, ArgumentClass *arg_class, guint32 *arg_size)
{
*arg_size = 0;
*arg_class = ARG_CLASS_NO_CLASS;
assert (klass != NULL && arg_info != NULL && type != NULL && arg_class != NULL && arg_size != NULL);
if (pinvoke) {
/* Calculate argument class type and size of marshalled type. */
MonoMarshalType *info = mono_marshal_load_type_info (klass);
*arg_size = info->native_size;
} else {
/* Calculate argument class type and size of managed type. */
*arg_size = mono_class_value_size (klass, NULL);
}
/* Windows ABI only handle value types on stack or passed in integer register (if it fits register size). */
*arg_class = MONO_WIN64_VALUE_TYPE_FITS_REG (*arg_size) ? ARG_CLASS_INTEGER : ARG_CLASS_MEMORY;
if (*arg_class == ARG_CLASS_MEMORY) {
/* Value type has a size that doesn't seem to fit register according to ABI. Try to used full stack size of type. */
*arg_size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, pinvoke);
}
/*
* Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte.
* GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html.
* This cause a little dilemma since runtime build using none GCC compiler will not be compatible with
* GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte
* it must be represented in call and cannot be dropped.
*/
if (*arg_size == 0 && MONO_TYPE_ISSTRUCT (type)) {
arg_info->pass_empty_struct = TRUE;
*arg_size = SIZEOF_REGISTER;
*arg_class = ARG_CLASS_INTEGER;
}
assert (*arg_class != ARG_CLASS_NO_CLASS);
}
static void
add_valuetype_win64 (MonoMethodSignature *signature, ArgInfo *arg_info, MonoType *type,
gboolean is_return, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size)
{
guint32 arg_size = SIZEOF_REGISTER;
MonoClass *klass = NULL;
ArgumentClass arg_class;
assert (signature != NULL && arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL);
klass = mono_class_from_mono_type_internal (type);
get_valuetype_size_win64 (klass, signature->pinvoke && !signature->marshalling_disabled, arg_info, type, &arg_class, &arg_size);
/* Only drop value type if its not an empty struct as input that must be represented in call */
if ((arg_size == 0 && !arg_info->pass_empty_struct) || (arg_info->pass_empty_struct && is_return)) {
arg_info->storage = ArgValuetypeInReg;
arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone;
} else {
/* Alocate storage for value type. */
allocate_storage_for_valuetype_win64 (arg_info, type, is_return, arg_class, arg_size, current_int_reg, current_float_reg, stack_size);
}
}
#endif /* TARGET_WIN32 */
static void
add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
guint32 *gr, guint32 *fr, guint32 *stack_size)
{
#ifdef TARGET_WIN32
add_valuetype_win64 (sig, ainfo, type, is_return, gr, fr, stack_size);
#else
guint32 size, quad, nquads, i, nfields;
/* Keep track of the size used in each quad so we can */
/* use the right size when copying args/return vars. */
guint32 quadsize [2] = {8, 8};
ArgumentClass args [2];
StructFieldInfo *fields = NULL;
GArray *fields_array;
MonoClass *klass;
gboolean pass_on_stack = FALSE;
int struct_size;
klass = mono_class_from_mono_type_internal (type);
size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled);
if (!sig->pinvoke && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) {
/* We pass and return vtypes of size 8 in a register */
} else if (!sig->pinvoke || (size == 0) || (size > 16)) {
pass_on_stack = TRUE;
}
/* If this struct can't be split up naturally into 8-byte */
/* chunks (registers), pass it on the stack. */
if (sig->pinvoke && !sig->marshalling_disabled) {
MonoMarshalType *info = mono_marshal_load_type_info (klass);
g_assert (info);
struct_size = info->native_size;
} else {
struct_size = mono_class_value_size (klass, NULL);
}
/*
* Collect field information recursively to be able to
* handle nested structures.
*/
fields_array = g_array_new (FALSE, TRUE, sizeof (StructFieldInfo));
collect_field_info_nested (klass, fields_array, 0, sig->pinvoke && !sig->marshalling_disabled, m_class_is_unicode (klass));
fields = (StructFieldInfo*)fields_array->data;
nfields = fields_array->len;
for (i = 0; i < nfields; ++i) {
if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) {
pass_on_stack = TRUE;
break;
}
}
if (size == 0) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
return;
}
if (pass_on_stack) {
/* Allways pass in memory */
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (size, 8);
ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
if (!is_return)
ainfo->arg_size = ALIGN_TO (size, 8);
g_array_free (fields_array, TRUE);
return;
}
if (size > 8)
nquads = 2;
else
nquads = 1;
if (!sig->pinvoke) {
int n = mono_class_value_size (klass, NULL);
quadsize [0] = n >= 8 ? 8 : n;
quadsize [1] = n >= 8 ? MAX (n - 8, 8) : 0;
/* Always pass in 1 or 2 integer registers */
args [0] = ARG_CLASS_INTEGER;
args [1] = ARG_CLASS_INTEGER;
/* Only the simplest cases are supported */
if (is_return && nquads != 1) {
args [0] = ARG_CLASS_MEMORY;
args [1] = ARG_CLASS_MEMORY;
}
} else {
/*
* Implement the algorithm from section 3.2.3 of the X86_64 ABI.
* The X87 and SSEUP stuff is left out since there are no such types in
* the CLR.
*/
if (!nfields) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
return;
}
if (struct_size > 16) {
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (struct_size, 8);
ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
if (!is_return)
ainfo->arg_size = ALIGN_TO (struct_size, 8);
g_array_free (fields_array, TRUE);
return;
}
args [0] = ARG_CLASS_NO_CLASS;
args [1] = ARG_CLASS_NO_CLASS;
for (quad = 0; quad < nquads; ++quad) {
ArgumentClass class1;
if (nfields == 0)
class1 = ARG_CLASS_MEMORY;
else
class1 = ARG_CLASS_NO_CLASS;
for (i = 0; i < nfields; ++i) {
if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) {
/* Unaligned field */
NOT_IMPLEMENTED;
}
/* Skip fields in other quad */
if ((quad == 0) && (fields [i].offset >= 8))
continue;
if ((quad == 1) && (fields [i].offset < 8))
continue;
/* How far into this quad this data extends.*/
/* (8 is size of quad) */
quadsize [quad] = fields [i].offset + fields [i].size - (quad * 8);
class1 = merge_argument_class_from_type (fields [i].type, class1);
}
/* Empty structs have a nonzero size, causing this assert to be hit */
if (sig->pinvoke)
g_assert (class1 != ARG_CLASS_NO_CLASS);
args [quad] = class1;
}
}
g_array_free (fields_array, TRUE);
/* Post merger cleanup */
if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
args [0] = args [1] = ARG_CLASS_MEMORY;
/* Allocate registers */
{
int orig_gr = *gr;
int orig_fr = *fr;
while (quadsize [0] != 1 && quadsize [0] != 2 && quadsize [0] != 4 && quadsize [0] != 8)
quadsize [0] ++;
while (quadsize [1] != 0 && quadsize [1] != 1 && quadsize [1] != 2 && quadsize [1] != 4 && quadsize [1] != 8)
quadsize [1] ++;
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
g_assert (quadsize [0] <= 8);
g_assert (quadsize [1] <= 8);
ainfo->pair_size [0] = quadsize [0];
ainfo->pair_size [1] = quadsize [1];
ainfo->nregs = nquads;
for (quad = 0; quad < nquads; ++quad) {
switch (args [quad]) {
case ARG_CLASS_INTEGER:
if (*gr >= PARAM_REGS)
args [quad] = ARG_CLASS_MEMORY;
else {
ainfo->pair_storage [quad] = ArgInIReg;
if (is_return)
ainfo->pair_regs [quad] = return_regs [*gr];
else
ainfo->pair_regs [quad] = param_regs [*gr];
(*gr) ++;
}
break;
case ARG_CLASS_SSE:
if (*fr >= FLOAT_PARAM_REGS)
args [quad] = ARG_CLASS_MEMORY;
else {
if (quadsize[quad] <= 4)
ainfo->pair_storage [quad] = ArgInFloatSSEReg;
else ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
ainfo->pair_regs [quad] = *fr;
(*fr) ++;
}
break;
case ARG_CLASS_MEMORY:
break;
case ARG_CLASS_NO_CLASS:
break;
default:
g_assert_not_reached ();
}
}
if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
int arg_size;
/* Revert possible register assignments */
*gr = orig_gr;
*fr = orig_fr;
ainfo->offset = *stack_size;
if (sig->pinvoke)
arg_size = ALIGN_TO (struct_size, 8);
else
arg_size = nquads * sizeof (target_mgreg_t);
*stack_size += arg_size;
ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
if (!is_return)
ainfo->arg_size = arg_size;
}
}
#endif /* !TARGET_WIN32 */
}
/*
* get_call_info:
*
* Obtain information about a call according to the calling convention.
* For AMD64 System V, see the "System V ABI, x86-64 Architecture Processor Supplement
* Draft Version 0.23" document for more information.
* For AMD64 Windows, see "Overview of x64 Calling Conventions",
* https://msdn.microsoft.com/en-us/library/ms235286.aspx
*/
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
CallInfo *cinfo;
gboolean is_pinvoke = sig->pinvoke;
if (mp)
cinfo = (CallInfo *)mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = (CallInfo *)g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
cinfo->gsharedvt = mini_is_gsharedvt_variable_signature (sig);
gr = 0;
fr = 0;
#ifdef TARGET_WIN32
/* Reserve space where the callee can save the argument registers */
stack_size = 4 * sizeof (target_mgreg_t);
#endif
/* return value */
ret_type = mini_get_underlying_type (sig->ret);
switch (ret_type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
case MONO_TYPE_R4:
cinfo->ret.storage = ArgInFloatSSEReg;
cinfo->ret.reg = AMD64_XMM0;
break;
case MONO_TYPE_R8:
cinfo->ret.storage = ArgInDoubleSSEReg;
cinfo->ret.reg = AMD64_XMM0;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
}
if (mini_is_gsharedvt_type (ret_type)) {
cinfo->ret.storage = ArgGsharedvtVariableInReg;
break;
}
/* fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
g_assert (cinfo->ret.storage != ArgInIReg);
break;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (ret_type));
cinfo->ret.storage = ArgGsharedvtVariableInReg;
break;
case MONO_TYPE_VOID:
break;
default:
g_error ("Can't handle as return value 0x%x", ret_type->type);
}
pstart = 0;
/*
* To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
* the first argument, allowing 'this' to be always passed in the first arg reg.
* Also do this if the first argument is a reference type, since virtual calls
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
ArgStorage ret_storage = cinfo->ret.storage;
if ((ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0);
} else {
add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
pstart = 1;
}
add_general (&gr, &stack_size, &cinfo->ret);
cinfo->ret.storage = ret_storage;
cinfo->vret_arg_index = 1;
} else {
/* this */
if (sig->hasthis)
add_general (&gr, &stack_size, cinfo->args + 0);
if (ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) {
add_general (&gr, &stack_size, &cinfo->ret);
cinfo->ret.storage = ret_storage;
}
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
fr = FLOAT_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
#ifdef TARGET_WIN32
/* The float param registers and other param registers must be the same index on Windows x64.*/
if (gr > fr)
fr = gr;
else if (fr > gr)
gr = fr;
#endif
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* We allways pass the sig cookie on the stack for simplicity */
/*
* Prevent implicit arguments + the sig cookie from being passed
* in registers.
*/
gr = PARAM_REGS;
fr = FLOAT_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
ptype = mini_get_underlying_type (sig->params [i]);
switch (ptype->type) {
case MONO_TYPE_I1:
ainfo->is_signed = 1;
case MONO_TYPE_U1:
add_general (&gr, &stack_size, ainfo);
ainfo->byte_arg_size = 1;
break;
case MONO_TYPE_I2:
ainfo->is_signed = 1;
case MONO_TYPE_U2:
add_general (&gr, &stack_size, ainfo);
ainfo->byte_arg_size = 2;
break;
case MONO_TYPE_I4:
ainfo->is_signed = 1;
case MONO_TYPE_U4:
add_general (&gr, &stack_size, ainfo);
ainfo->byte_arg_size = 4;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (&gr, &stack_size, ainfo);
break;
}
if (mini_is_gsharedvt_variable_type (ptype)) {
/* gsharedvt arguments are passed by ref */
add_general (&gr, &stack_size, ainfo);
if (ainfo->storage == ArgInIReg)
ainfo->storage = ArgGSharedVtInReg;
else
ainfo->storage = ArgGSharedVtOnStack;
break;
}
/* fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
add_valuetype (sig, ainfo, ptype, FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_R4:
add_float (&fr, &stack_size, ainfo, FALSE);
break;
case MONO_TYPE_R8:
add_float (&fr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt arguments are passed by ref */
g_assert (mini_is_gsharedvt_type (ptype));
add_general (&gr, &stack_size, ainfo);
if (ainfo->storage == ArgInIReg)
ainfo->storage = ArgGSharedVtInReg;
else
ainfo->storage = ArgGSharedVtOnStack;
break;
default:
g_assert_not_reached ();
}
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
gr = PARAM_REGS;
fr = FLOAT_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
cinfo->stack_usage = stack_size;
cinfo->reg_usage = gr;
cinfo->freg_usage = fr;
return cinfo;
}
static int
arg_need_temp (ArgInfo *ainfo)
{
// Value types using one register doesn't need temp.
if (ainfo->storage == ArgValuetypeInReg && ainfo->nregs > 1)
return ainfo->nregs * sizeof (host_mgreg_t);
return 0;
}
static gpointer
arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
{
switch (ainfo->storage) {
case ArgInIReg:
return &ccontext->gregs [ainfo->reg];
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
return &ccontext->fregs [ainfo->reg];
case ArgOnStack:
case ArgValuetypeAddrOnStack:
return ccontext->stack + ainfo->offset;
case ArgValuetypeInReg:
// Empty struct
if (ainfo->nregs == 0)
return NULL;
// Value type using one register can be stored
// directly in its context gregs/fregs slot.
g_assert (ainfo->nregs == 1);
switch (ainfo->pair_storage [0]) {
case ArgInIReg:
return &ccontext->gregs [ainfo->pair_regs [0]];
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
return &ccontext->fregs [ainfo->pair_regs [0]];
default:
g_assert_not_reached ();
}
case ArgValuetypeAddrInIReg:
g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone);
return &ccontext->gregs [ainfo->pair_regs [0]];
default:
g_error ("Arg storage type not yet supported");
}
}
static void
arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
g_assert (arg_need_temp (ainfo));
host_mgreg_t *dest_cast = (host_mgreg_t*)dest;
/* Reconstruct the value type */
for (int k = 0; k < ainfo->nregs; k++) {
int storage_type = ainfo->pair_storage [k];
int reg_storage = ainfo->pair_regs [k];
switch (storage_type) {
case ArgInIReg:
*dest_cast = ccontext->gregs [reg_storage];
break;
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
*(double*)dest_cast = ccontext->fregs [reg_storage];
break;
default:
g_assert_not_reached ();
}
dest_cast++;
}
}
static void
arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
{
g_assert (arg_need_temp (ainfo));
host_mgreg_t *src_cast = (host_mgreg_t*)src;
for (int k = 0; k < ainfo->nregs; k++) {
int storage_type = ainfo->pair_storage [k];
int reg_storage = ainfo->pair_regs [k];
switch (storage_type) {
case ArgInIReg:
ccontext->gregs [reg_storage] = *src_cast;
break;
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
ccontext->fregs [reg_storage] = *(double*)src_cast;
break;
default:
g_assert_not_reached ();
}
src_cast++;
}
}
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
CallInfo *cinfo = get_call_info (NULL, sig);
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
gpointer storage;
ArgInfo *ainfo;
memset (ccontext, 0, sizeof (CallContext));
ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
if (ccontext->stack_size)
ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgValuetypeAddrInIReg) {
storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)storage;
}
}
g_assert (!sig->hasthis);
for (int i = 0; i < sig->param_count; i++) {
ainfo = &cinfo->args [i];
if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) {
storage = arg_get_storage (ccontext, ainfo);
*(gpointer *)storage = interp_cb->frame_arg_to_storage (frame, sig, i);
continue;
}
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size); // FIXME? alloca in a loop
else
storage = arg_get_storage (ccontext, ainfo);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
gpointer storage;
ArgInfo *ainfo;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (retp) {
g_assert (cinfo->ret.storage == ArgValuetypeAddrInIReg);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp);
#ifdef TARGET_WIN32
// Windows x64 ABI ainfo implementation includes info on how to return value type address.
// back to caller.
storage = arg_get_storage (ccontext, ainfo);
*(gpointer *)storage = retp;
#endif
} else {
g_assert (cinfo->ret.storage != ArgValuetypeAddrInIReg);
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size);
else
storage = arg_get_storage (ccontext, ainfo);
memset (ccontext, 0, sizeof (CallContext)); // FIXME
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
ainfo = &cinfo->args [i];
if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) {
storage = arg_get_storage (ccontext, ainfo);
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, *(gpointer *)storage);
continue;
}
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size); // FIXME? alloca in a loop
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
}
storage = NULL;
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgValuetypeAddrInIReg)
storage = (gpointer) ccontext->gregs [cinfo->ret.reg];
}
g_free (cinfo);
return storage;
}
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
ArgInfo *ainfo;
gpointer storage;
/* No return value */
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
/* The return values were stored directly at address passed in reg */
if (cinfo->ret.storage != ArgValuetypeAddrInIReg) {
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size);
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
/*
* mono_arch_get_argument_info:
* @csig: a method signature
* @param_count: the number of parameters to consider
* @arg_info: an array to store the result infos
*
* Gathers information on parameters such as size, alignment and
* padding. arg_info should be large enought to hold param_count + 1 entries.
*
* Returns the size of the argument area on the stack.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
CallInfo *cinfo = get_call_info (NULL, csig);
guint32 args_size = cinfo->stack_usage;
/* The arguments are saved to a stack area in mono_arch_instrument_prolog */
if (csig->hasthis) {
arg_info [0].offset = 0;
}
for (k = 0; k < param_count; k++) {
arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
/* FIXME: */
arg_info [k + 1].size = 0;
}
g_free (cinfo);
return args_size;
}
#ifndef DISABLE_JIT
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
&& IS_SUPPORTED_TAILCALL (callee_info->ret.storage == caller_info->ret.storage);
// Limit stack_usage to 1G. Assume 32bit limits when we move parameters.
res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30));
res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30));
// valuetype parameters are address of local
const ArgInfo *ainfo;
ainfo = callee_info->args + callee_sig->hasthis;
for (int i = 0; res && i < callee_sig->param_count; ++i) {
res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrInIReg)
&& IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrOnStack);
}
g_free (caller_info);
g_free (callee_info);
return res;
}
#endif /* DISABLE_JIT */
/*
* Initialize the cpu to execute managed code.
*/
void
mono_arch_cpu_init (void)
{
#ifndef _MSC_VER
guint16 fpcw;
/* spec compliance requires running with double precision */
__asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
fpcw &= ~X86_FPCW_PRECC_MASK;
fpcw |= X86_FPCW_PREC_DOUBLE;
__asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
__asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
#else
/* TODO: This is crashing on Win64 right now.
* _control87 (_PC_53, MCW_PC);
*/
#endif
}
/*
* Initialize architecture specific code.
*/
void
mono_arch_init (void)
{
#ifndef DISABLE_JIT
if (!mono_aot_only)
bp_trampoline = mini_get_breakpoint_trampoline ();
#endif
}
/*
* Cleanup architecture specific code.
*/
void
mono_arch_cleanup (void)
{
}
/*
* This function returns the optimizations supported on this cpu.
*/
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
guint32 opts = 0;
*exclude_mask = 0;
if (mono_hwcap_x86_has_cmov) {
opts |= MONO_OPT_CMOV;
if (mono_hwcap_x86_has_fcmov)
opts |= MONO_OPT_FCMOV;
else
*exclude_mask |= MONO_OPT_FCMOV;
} else {
*exclude_mask |= MONO_OPT_CMOV;
}
return opts;
}
MonoCPUFeatures
mono_arch_get_cpu_features (void)
{
guint64 features = MONO_CPU_INITED;
if (mono_hwcap_x86_has_sse1)
features |= MONO_CPU_X86_SSE;
if (mono_hwcap_x86_has_sse2)
features |= MONO_CPU_X86_SSE2;
if (mono_hwcap_x86_has_sse3)
features |= MONO_CPU_X86_SSE3;
if (mono_hwcap_x86_has_ssse3)
features |= MONO_CPU_X86_SSSE3;
if (mono_hwcap_x86_has_sse41)
features |= MONO_CPU_X86_SSE41;
if (mono_hwcap_x86_has_sse42)
features |= MONO_CPU_X86_SSE42;
if (mono_hwcap_x86_has_popcnt)
features |= MONO_CPU_X86_POPCNT;
if (mono_hwcap_x86_has_lzcnt)
features |= MONO_CPU_X86_LZCNT;
return (MonoCPUFeatures)features;
}
#ifndef DISABLE_JIT
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
if (mono_is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = g_list_prepend (vars, vmv);
}
}
vars = mono_varlist_sort (cfg, vars, 0);
return vars;
}
/**
* mono_arch_compute_omit_fp:
* Determine whether the frame pointer can be eliminated.
*/
static void
mono_arch_compute_omit_fp (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i, locals_size;
CallInfo *cinfo;
if (cfg->arch.omit_fp_computed)
return;
header = cfg->header;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*
* FIXME: Remove some of the restrictions.
*/
cfg->arch.omit_fp = TRUE;
cfg->arch.omit_fp_computed = TRUE;
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
if (!debug_omit_fp ())
cfg->arch.omit_fp = FALSE;
/*
if (cfg->method->save_lmf)
cfg->arch.omit_fp = FALSE;
*/
if (cfg->flags & MONO_CFG_HAS_ALLOCA)
cfg->arch.omit_fp = FALSE;
if (header->num_clauses)
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) {
/*
* The stack offset can only be determined when the frame
* size is known.
*/
cfg->arch.omit_fp = FALSE;
}
}
locals_size = 0;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
int ialign;
locals_size += mono_type_size (ins->inst_vtype, &ialign);
}
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
mono_arch_compute_omit_fp (cfg);
if (cfg->arch.omit_fp)
regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
/* We use the callee saved registers for global allocation */
regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
#ifdef TARGET_WIN32
regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
#endif
return regs;
}
/*
* mono_arch_regalloc_cost:
*
* Return the cost, in number of memory references, of the action of
* allocating the variable VMV into a register during global register
* allocation.
*/
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
MonoInst *ins = cfg->varinfo [vmv->idx];
if (cfg->method->save_lmf)
/* The register is already saved */
/* substract 1 for the invisible store in the prolog */
return (ins->opcode == OP_ARG) ? 0 : 1;
else
/* push+pop */
return (ins->opcode == OP_ARG) ? 1 : 2;
}
/*
* mono_arch_fill_argument_info:
*
* Populate cfg->args, cfg->ret and cfg->vret_addr with information about the arguments
* of the method.
*/
void
mono_arch_fill_argument_info (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoInst *ins;
int i;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
cinfo = cfg->arch.cinfo;
/*
* Contrary to mono_arch_allocate_vars (), the information should describe
* where the arguments are at the beginning of the method, not where they can be
* accessed during the execution of the method. The later makes no sense for the
* global register allocator, since a variable can be in more than one location.
*/
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgValuetypeInReg:
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = -1;
cfg->ret->inst_offset = -1;
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
ins = cfg->args [i];
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
ins->opcode = OP_REGVAR;
ins->inst_c0 = ainfo->reg;
break;
case ArgOnStack:
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = -1;
ins->inst_offset = -1;
break;
case ArgValuetypeInReg:
/* Dummy */
ins->opcode = OP_NOP;
break;
default:
g_assert_not_reached ();
}
}
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoType *sig_ret;
MonoMethodSignature *sig;
MonoInst *ins;
int i, offset;
guint32 locals_stack_size, locals_stack_align;
gint32 *offsets;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
cinfo = cfg->arch.cinfo;
sig_ret = mini_get_underlying_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
/*
* We use the ABI calling conventions for managed code as well.
* Exception: valuetypes are only sometimes passed or returned in registers.
*/
/*
* The stack looks like this:
* <incoming arguments passed on the stack>
* <return value>
* <lmf/caller saved registers>
* <locals>
* <spill area>
* <localloc area> -> grows dynamically
* <params area>
*/
if (cfg->arch.omit_fp) {
cfg->flags |= MONO_CFG_HAS_SPILLUP;
cfg->frame_reg = AMD64_RSP;
offset = 0;
} else {
/* Locals are allocated backwards from %fp */
cfg->frame_reg = AMD64_RBP;
offset = 0;
}
cfg->arch.saved_iregs = cfg->used_int_regs;
if (cfg->method->save_lmf) {
/* Save all callee-saved registers normally (except RBP, if not already used), and restore them when unwinding through an LMF */
guint32 iregs_to_save = AMD64_CALLEE_SAVED_REGS & ~(1<<AMD64_RBP);
cfg->arch.saved_iregs |= iregs_to_save;
}
if (cfg->arch.omit_fp)
cfg->arch.reg_save_area_offset = offset;
/* Reserve space for callee saved registers */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
offset += sizeof (target_mgreg_t);
}
if (!cfg->arch.omit_fp)
cfg->arch.reg_save_area_offset = -offset;
if (sig_ret->type != MONO_TYPE_VOID) {
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cinfo->ret.reg;
cfg->ret->dreg = cinfo->ret.reg;
break;
case ArgValuetypeAddrInIReg:
case ArgGsharedvtVariableInReg:
/* The register is volatile */
cfg->vret_addr->opcode = OP_REGOFFSET;
cfg->vret_addr->inst_basereg = cfg->frame_reg;
if (cfg->arch.omit_fp) {
cfg->vret_addr->inst_offset = offset;
offset += 8;
} else {
offset += 8;
cfg->vret_addr->inst_offset = -offset;
}
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr =");
mono_print_ins (cfg->vret_addr);
}
break;
case ArgValuetypeInReg:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = cfg->frame_reg;
if (cfg->arch.omit_fp) {
cfg->ret->inst_offset = offset;
offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16;
} else {
offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16;
cfg->ret->inst_offset = - offset;
}
break;
default:
g_assert_not_reached ();
}
}
/* Allocate locals */
offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
if (cfg->arch.omit_fp) {
cfg->locals_min_stack_offset = offset;
cfg->locals_max_stack_offset = offset + locals_stack_size;
} else {
cfg->locals_min_stack_offset = - (offset + locals_stack_size);
cfg->locals_max_stack_offset = - offset;
}
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
if (offsets [i] != -1) {
MonoInst *ins = cfg->varinfo [i];
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
if (cfg->arch.omit_fp)
ins->inst_offset = (offset + offsets [i]);
else
ins->inst_offset = - (offset + offsets [i]);
//printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
}
}
offset += locals_stack_size;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
g_assert (!cfg->arch.omit_fp);
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ins = cfg->args [i];
if (ins->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
/* FIXME: Allocate volatile arguments to registers */
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
inreg = FALSE;
/*
* Under AMD64, all registers used to pass arguments to functions
* are volatile across calls.
* FIXME: Optimize this.
*/
if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg) || (ainfo->storage == ArgGSharedVtInReg))
inreg = FALSE;
ins->opcode = OP_REGOFFSET;
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
case ArgGSharedVtInReg:
if (inreg) {
ins->opcode = OP_REGVAR;
ins->dreg = ainfo->reg;
}
break;
case ArgOnStack:
case ArgGSharedVtOnStack:
g_assert (!cfg->arch.omit_fp);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = ainfo->offset + ARGS_OFFSET;
break;
case ArgValuetypeInReg:
break;
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack: {
MonoInst *indir;
g_assert (!cfg->arch.omit_fp);
g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone));
MONO_INST_NEW (cfg, indir, 0);
indir->opcode = OP_REGOFFSET;
if (ainfo->pair_storage [0] == ArgInIReg) {
indir->inst_basereg = cfg->frame_reg;
offset = ALIGN_TO (offset, sizeof (target_mgreg_t));
offset += sizeof (target_mgreg_t);
indir->inst_offset = - offset;
}
else {
indir->inst_basereg = cfg->frame_reg;
indir->inst_offset = ainfo->offset + ARGS_OFFSET;
}
ins->opcode = OP_VTARG_ADDR;
ins->inst_left = indir;
break;
}
default:
NOT_IMPLEMENTED;
}
if (!inreg && (ainfo->storage != ArgOnStack) && (ainfo->storage != ArgValuetypeAddrInIReg) && (ainfo->storage != ArgValuetypeAddrOnStack) && (ainfo->storage != ArgGSharedVtOnStack)) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
offset = ALIGN_TO (offset, sizeof (target_mgreg_t));
if (cfg->arch.omit_fp) {
ins->inst_offset = offset;
offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t);
// Arguments are yet supported by the stack map creation code
//cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
} else {
offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t);
ins->inst_offset = - offset;
//cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
}
}
}
}
cfg->stack_offset = offset;
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
if (cinfo->ret.storage == ArgValuetypeAddrInIReg || cinfo->ret.storage == ArgGsharedvtVariableInReg) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *ins;
if (cfg->compile_aot) {
MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
}
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_tramp_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.bp_tramp_var = ins;
}
if (cfg->method->save_lmf)
cfg->create_lmf_var = TRUE;
if (cfg->method->save_lmf) {
cfg->lmf_ir = TRUE;
}
}
static void
add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
{
MonoInst *ins;
switch (storage) {
case ArgInIReg:
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg_copy (cfg, tree->dreg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
break;
case ArgInFloatSSEReg:
MONO_INST_NEW (cfg, ins, OP_AMD64_SET_XMMREG_R4);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
case ArgInDoubleSSEReg:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
}
}
static int
arg_storage_to_load_membase (ArgStorage storage)
{
switch (storage) {
case ArgInIReg:
#if defined(MONO_ARCH_ILP32)
return OP_LOADI8_MEMBASE;
#else
return OP_LOAD_MEMBASE;
#endif
case ArgInDoubleSSEReg:
return OP_LOADR8_MEMBASE;
case ArgInFloatSSEReg:
return OP_LOADR4_MEMBASE;
default:
g_assert_not_reached ();
}
return -1;
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmp_sig;
int sig_reg;
if (call->tailcall) // FIXME tailcall is not always yet initialized.
NOT_IMPLEMENTED;
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it are
* passed on the stack after the signature. So compensate by
* passing a different signature.
*/
tmp_sig = mono_metadata_signature_dup_full (m_class_get_image (cfg->method->klass), call->signature);
tmp_sig->param_count -= call->signature->sentinelpos;
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
sig_reg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg);
}
#ifdef ENABLE_LLVM
static LLVMArgStorage
arg_storage_to_llvm_arg_storage (MonoCompile *cfg, ArgStorage storage)
{
switch (storage) {
case ArgInIReg:
return LLVMArgInIReg;
case ArgNone:
return LLVMArgNone;
case ArgGSharedVtInReg:
case ArgGSharedVtOnStack:
return LLVMArgGSharedVt;
default:
g_assert_not_reached ();
return LLVMArgNone;
}
}
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
int j;
LLVMCallInfo *linfo;
MonoType *t, *sig_ret;
n = sig->param_count + sig->hasthis;
sig_ret = mini_get_underlying_type (sig->ret);
cinfo = get_call_info (cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
/*
* LLVM always uses the native ABI while we use our own ABI, the
* only difference is the handling of vtypes:
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
switch (cinfo->ret.storage) {
case ArgNone:
linfo->ret.storage = LLVMArgNone;
break;
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
linfo->ret.storage = LLVMArgNormal;
break;
case ArgValuetypeInReg: {
ainfo = &cinfo->ret;
if (sig->pinvoke &&
(ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg ||
ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) {
cfg->exception_message = g_strdup ("pinvoke + vtype ret");
cfg->disable_llvm = TRUE;
return linfo;
}
linfo->ret.storage = LLVMArgVtypeInReg;
for (j = 0; j < 2; ++j)
linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
break;
}
case ArgValuetypeAddrInIReg:
case ArgGsharedvtVariableInReg:
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
break;
default:
g_assert_not_reached ();
break;
}
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mini_type_get_underlying_type (t);
linfo->args [i].storage = LLVMArgNone;
switch (ainfo->storage) {
case ArgInIReg:
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgInDoubleSSEReg:
case ArgInFloatSSEReg:
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgOnStack:
if (MONO_TYPE_ISSTRUCT (t))
linfo->args [i].storage = LLVMArgVtypeByVal;
else
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgValuetypeInReg:
if (sig->pinvoke &&
(ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg ||
ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) {
cfg->exception_message = g_strdup ("pinvoke + vtypes");
cfg->disable_llvm = TRUE;
return linfo;
}
linfo->args [i].storage = LLVMArgVtypeInReg;
for (j = 0; j < 2; ++j)
linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
break;
case ArgGSharedVtInReg:
case ArgGSharedVtOnStack:
linfo->args [i].storage = LLVMArgGSharedVt;
break;
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
linfo->args [i].storage = LLVMArgVtypeAddr;
break;
default:
cfg->exception_message = g_strdup ("ainfo->storage");
cfg->disable_llvm = TRUE;
break;
}
}
return linfo;
}
#endif
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *arg, *in;
MonoMethodSignature *sig;
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
sig = call->signature;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
if (COMPILE_LLVM (cfg)) {
/* We shouldn't be called in the llvm case */
cfg->disable_llvm = TRUE;
return;
}
/*
* Emit all arguments which are passed on the stack to prevent register
* allocation problems.
*/
for (i = 0; i < n; ++i) {
MonoType *t;
ainfo = cinfo->args + i;
in = call->args [i];
if (sig->hasthis && i == 0)
t = mono_get_object_type ();
else
t = sig->params [i - sig->hasthis];
t = mini_get_underlying_type (t);
//XXX what about ArgGSharedVtOnStack here?
if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t)) {
if (!m_type_is_byref (t)) {
if (t->type == MONO_TYPE_R4)
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
else if (t->type == MONO_TYPE_R8)
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
else
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
}
if (cfg->compute_gc_maps) {
MonoInst *def;
EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t);
}
}
}
/*
* Emit all parameters passed in registers in non-reverse order for better readability
* and to help the optimization in emit_prolog ().
*/
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
in = call->args [i];
if (ainfo->storage == ArgInIReg)
add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
}
for (i = n - 1; i >= 0; --i) {
MonoType *t;
ainfo = cinfo->args + i;
in = call->args [i];
if (sig->hasthis && i == 0)
t = mono_get_object_type ();
else
t = sig->params [i - sig->hasthis];
t = mini_get_underlying_type (t);
switch (ainfo->storage) {
case ArgInIReg:
/* Already done */
break;
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
break;
case ArgOnStack:
case ArgValuetypeInReg:
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
case ArgGSharedVtInReg:
case ArgGSharedVtOnStack: {
if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t))
/* Already emitted above */
break;
guint32 align;
guint32 size;
if (sig->pinvoke && !sig->marshalling_disabled)
size = mono_type_native_stack_size (t, &align);
else {
/*
* Other backends use mono_type_stack_size (), but that
* aligns the size to 8, which is larger than the size of
* the source, leading to reads of invalid memory if the
* source is at the end of address space.
*/
size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align);
}
if (size >= 10000) {
/* Avoid asserts in emit_memcpy () */
mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Passing an argument of size '%d'.", size));
/* Continue normally */
}
if (size > 0 || ainfo->pass_empty_struct) {
MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
arg->sreg1 = in->dreg;
arg->klass = mono_class_from_mono_type_internal (t);
arg->backend.size = size;
arg->inst_p0 = call;
arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
MONO_ADD_INS (cfg->cbb, arg);
}
break;
}
default:
g_assert_not_reached ();
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos))
/* Emit the signature cookie just before the implicit arguments */
emit_sig_cookie (cfg, call, cinfo);
}
/* Handle the case where there are no implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
switch (cinfo->ret.storage) {
case ArgValuetypeInReg:
if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
/*
* Tell the JIT to use a more efficient calling convention: call using
* OP_CALL, compute the result location after the call, and save the
* result there.
*/
call->vret_in_reg = TRUE;
/*
* Nullify the instruction computing the vret addr to enable
* future optimizations.
*/
if (call->vret_var)
NULLIFY_INS (call->vret_var);
} else {
if (call->tailcall)
NOT_IMPLEMENTED;
/*
* The valuetype is in RAX:RDX after the call, need to be copied to
* the stack. Push the address here, so the call instruction can
* access it.
*/
if (!cfg->arch.vret_addr_loc) {
cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* Prevent it from being register allocated or optimized away */
cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
}
break;
case ArgValuetypeAddrInIReg:
case ArgGsharedvtVariableInReg: {
MonoInst *vtarg;
MONO_INST_NEW (cfg, vtarg, OP_MOVE);
vtarg->sreg1 = call->vret_var->dreg;
vtarg->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
break;
}
default:
break;
}
if (cfg->method->save_lmf) {
MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF);
MONO_ADD_INS (cfg->cbb, arg);
}
call->stack_usage = cinfo->stack_usage;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoInst *arg;
MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
int size = ins->backend.size;
switch (ainfo->storage) {
case ArgValuetypeInReg: {
MonoInst *load;
int part;
for (part = 0; part < 2; ++part) {
if (ainfo->pair_storage [part] == ArgNone)
continue;
if (ainfo->pass_empty_struct) {
//Pass empty struct value as 0 on platforms representing empty structs as 1 byte.
NEW_ICONST (cfg, load, 0);
}
else {
MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part]));
load->inst_basereg = src->dreg;
load->inst_offset = part * sizeof (target_mgreg_t);
switch (ainfo->pair_storage [part]) {
case ArgInIReg:
load->dreg = mono_alloc_ireg (cfg);
break;
case ArgInDoubleSSEReg:
case ArgInFloatSSEReg:
load->dreg = mono_alloc_freg (cfg);
break;
default:
g_assert_not_reached ();
}
}
MONO_ADD_INS (cfg->cbb, load);
add_outarg_reg (cfg, call, ainfo->pair_storage [part], ainfo->pair_regs [part], load);
}
break;
}
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack: {
MonoInst *vtaddr, *load;
g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone));
vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL);
vtaddr->backend.is_pinvoke = call->signature->pinvoke && !call->signature->marshalling_disabled;
MONO_INST_NEW (cfg, load, OP_LDADDR);
cfg->has_indirection = TRUE;
load->inst_p0 = vtaddr;
vtaddr->flags |= MONO_INST_INDIRECT;
load->type = STACK_MP;
load->klass = vtaddr->klass;
load->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, load);
mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
if (ainfo->pair_storage [0] == ArgInIReg) {
MONO_INST_NEW (cfg, arg, OP_AMD64_LEA_MEMBASE);
arg->dreg = mono_alloc_ireg (cfg);
arg->sreg1 = load->dreg;
arg->inst_imm = 0;
MONO_ADD_INS (cfg->cbb, arg);
mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, ainfo->pair_regs [0], FALSE);
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, load->dreg);
}
break;
}
case ArgGSharedVtInReg:
/* Pass by addr */
mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
break;
case ArgGSharedVtOnStack:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, src->dreg);
break;
default:
if (size == 8) {
int dreg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, dreg);
} else if (size <= 40) {
mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
} else {
// FIXME: Code growth
mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
}
if (cfg->compute_gc_maps) {
MonoInst *def;
EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass));
}
}
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (ret->type == MONO_TYPE_R4) {
if (COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
else
MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
return;
} else if (ret->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
return;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
#endif /* DISABLE_JIT */
#define EMIT_COND_BRANCH(ins,cond,sign) \
if (ins->inst_true_bb->native_offset) { \
x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
} else { \
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
if (optimize_branch_pred && \
x86_is_imm8 (ins->inst_true_bb->max_offset - offset)) \
x86_branch8 (code, cond, 0, sign); \
else \
x86_branch32 (code, cond, 0, sign); \
}
typedef struct {
MonoMethodSignature *sig;
CallInfo *cinfo;
int nstack_args, nullable_area;
} ArchDynCallInfo;
static gboolean
dyn_call_supported (MonoMethodSignature *sig, CallInfo *cinfo)
{
int i;
switch (cinfo->ret.storage) {
case ArgNone:
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
case ArgValuetypeAddrInIReg:
case ArgValuetypeInReg:
break;
default:
return FALSE;
}
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
case ArgValuetypeInReg:
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
case ArgOnStack:
break;
default:
return FALSE;
}
}
return TRUE;
}
/*
* mono_arch_dyn_call_prepare:
*
* Return a pointer to an arch-specific structure which contains information
* needed by mono_arch_get_dyn_call_args (). Return NULL if OP_DYN_CALL is not
* supported for SIG.
* This function is equivalent to ffi_prep_cif in libffi.
*/
MonoDynCallInfo*
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
ArchDynCallInfo *info;
CallInfo *cinfo;
int i, aindex;
cinfo = get_call_info (NULL, sig);
if (!dyn_call_supported (sig, cinfo)) {
g_free (cinfo);
return NULL;
}
info = g_new0 (ArchDynCallInfo, 1);
// FIXME: Preprocess the info to speed up get_dyn_call_args ().
info->sig = sig;
info->cinfo = cinfo;
info->nstack_args = 0;
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
switch (ainfo->storage) {
case ArgOnStack:
case ArgValuetypeAddrOnStack:
info->nstack_args = MAX (info->nstack_args, (ainfo->offset / sizeof (target_mgreg_t)) + (ainfo->arg_size / sizeof (target_mgreg_t)));
break;
default:
break;
}
}
for (aindex = 0; aindex < sig->param_count; aindex++) {
MonoType *t = sig->params [aindex];
ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis];
if (m_type_is_byref (t))
continue;
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
int size;
if (!(ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack)) {
/* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */
size = mono_class_value_size (klass, NULL);
info->nullable_area += size;
}
}
break;
default:
break;
}
}
info->nullable_area = ALIGN_TO (info->nullable_area, 16);
/* Align to 16 bytes */
if (info->nstack_args & 1)
info->nstack_args ++;
return (MonoDynCallInfo*)info;
}
/*
* mono_arch_dyn_call_free:
*
* Free a MonoDynCallInfo structure.
*/
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_free (ainfo->cinfo);
g_free (ainfo);
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
/* Extend the 'regs' field dynamically */
return sizeof (DynCallArgs) + (ainfo->nstack_args * sizeof (target_mgreg_t)) + ainfo->nullable_area;
}
#define PTR_TO_GREG(ptr) ((host_mgreg_t)(ptr))
#define GREG_TO_PTR(greg) ((gpointer)(greg))
/*
* mono_arch_get_start_dyn_call:
*
* Convert the arguments ARGS to a format which can be passed to OP_DYN_CALL, and
* store the result into BUF.
* ARGS should be an array of pointers pointing to the arguments.
* RET should point to a memory buffer large enought to hold the result of the
* call.
* This function should be as fast as possible, any work which does not depend
* on the actual values of the arguments should be done in
* mono_arch_dyn_call_prepare ().
* start_dyn_call + OP_DYN_CALL + finish_dyn_call is equivalent to ffi_call in
* libffi.
*/
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
int arg_index, greg, i, pindex;
MonoMethodSignature *sig = dinfo->sig;
int buffer_offset = 0;
guint8 *nullable_buffer;
static int general_param_reg_to_index [MONO_MAX_IREGS];
static int float_param_reg_to_index [MONO_MAX_FREGS];
static gboolean param_reg_to_index_inited;
if (!param_reg_to_index_inited) {
for (i = 0; i < PARAM_REGS; ++i)
general_param_reg_to_index [param_regs[i]] = i;
for (i = 0; i < FLOAT_PARAM_REGS; ++i)
float_param_reg_to_index [float_param_regs[i]] = i;
mono_memory_barrier ();
param_reg_to_index_inited = 1;
} else {
mono_memory_barrier ();
}
p->res = 0;
p->ret = ret;
p->nstack_args = dinfo->nstack_args;
arg_index = 0;
greg = 0;
pindex = 0;
/* Stored after the stack arguments */
nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + dinfo->nstack_args]);
if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
p->regs [greg ++] = PTR_TO_GREG(*(args [arg_index ++]));
if (!sig->hasthis)
pindex = 1;
}
if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg)
p->regs [greg ++] = PTR_TO_GREG (ret);
for (; pindex < sig->param_count; pindex++) {
MonoType *t = mini_get_underlying_type (sig->params [pindex]);
gpointer *arg = args [arg_index ++];
ArgInfo *ainfo = &dinfo->cinfo->args [pindex + sig->hasthis];
int slot;
if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrOnStack) {
slot = PARAM_REGS + (ainfo->offset / sizeof (target_mgreg_t));
} else if (ainfo->storage == ArgValuetypeAddrInIReg) {
g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone);
slot = general_param_reg_to_index [ainfo->pair_regs [0]];
} else if (ainfo->storage == ArgInFloatSSEReg || ainfo->storage == ArgInDoubleSSEReg) {
slot = float_param_reg_to_index [ainfo->reg];
} else {
slot = general_param_reg_to_index [ainfo->reg];
}
if (m_type_is_byref (t)) {
p->regs [slot] = PTR_TO_GREG (*(arg));
continue;
}
switch (t->type) {
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if !defined(MONO_ARCH_ILP32)
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#endif
p->regs [slot] = PTR_TO_GREG (*(arg));
break;
#if defined(MONO_ARCH_ILP32)
case MONO_TYPE_I8:
case MONO_TYPE_U8:
p->regs [slot] = *(guint64*)(arg);
break;
#endif
case MONO_TYPE_U1:
p->regs [slot] = *(guint8*)(arg);
break;
case MONO_TYPE_I1:
p->regs [slot] = *(gint8*)(arg);
break;
case MONO_TYPE_I2:
p->regs [slot] = *(gint16*)(arg);
break;
case MONO_TYPE_U2:
p->regs [slot] = *(guint16*)(arg);
break;
case MONO_TYPE_I4:
p->regs [slot] = *(gint32*)(arg);
break;
case MONO_TYPE_U4:
p->regs [slot] = *(guint32*)(arg);
break;
case MONO_TYPE_R4: {
double d;
*(float*)&d = *(float*)(arg);
if (ainfo->storage == ArgOnStack) {
*(double *)(p->regs + slot) = d;
} else {
p->has_fp = 1;
p->fregs [slot] = d;
}
break;
}
case MONO_TYPE_R8:
if (ainfo->storage == ArgOnStack) {
*(double *)(p->regs + slot) = *(double*)(arg);
} else {
p->has_fp = 1;
p->fregs [slot] = *(double*)(arg);
}
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
p->regs [slot] = PTR_TO_GREG (*(arg));
break;
} else if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
guint8 *nullable_buf;
int size;
size = mono_class_value_size (klass, NULL);
if (ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack) {
nullable_buf = g_alloca (size);
} else {
nullable_buf = nullable_buffer + buffer_offset;
buffer_offset += size;
g_assert (buffer_offset <= dinfo->nullable_area);
}
/* The argument pointed to by arg is either a boxed vtype or null */
mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
arg = (gpointer*)nullable_buf;
/* Fall though */
} else {
/* Fall through */
}
case MONO_TYPE_VALUETYPE: {
switch (ainfo->storage) {
case ArgValuetypeInReg:
for (i = 0; i < 2; ++i) {
switch (ainfo->pair_storage [i]) {
case ArgNone:
break;
case ArgInIReg:
slot = general_param_reg_to_index [ainfo->pair_regs [i]];
p->regs [slot] = ((target_mgreg_t*)(arg))[i];
break;
case ArgInFloatSSEReg: {
double d;
p->has_fp = 1;
slot = float_param_reg_to_index [ainfo->pair_regs [i]];
*(float*)&d = ((float*)(arg))[i];
p->fregs [slot] = d;
break;
}
case ArgInDoubleSSEReg:
p->has_fp = 1;
slot = float_param_reg_to_index [ainfo->pair_regs [i]];
p->fregs [slot] = ((double*)(arg))[i];
break;
default:
g_assert_not_reached ();
break;
}
}
break;
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
// In DYNCALL use case value types are already copied when included in parameter array.
// Currently no need to make an extra temporary value type on stack for this use case.
p->regs [slot] = (target_mgreg_t)arg;
break;
case ArgOnStack:
for (i = 0; i < ainfo->arg_size / 8; ++i)
p->regs [slot + i] = ((target_mgreg_t*)(arg))[i];
break;
default:
g_assert_not_reached ();
break;
}
break;
}
default:
g_assert_not_reached ();
}
}
}
/*
* mono_arch_finish_dyn_call:
*
* Store the result of a dyn call into the return value buffer passed to
* start_dyn_call ().
* This function should be as fast as possible, any work which does not depend
* on the actual values of the arguments should be done in
* mono_arch_dyn_call_prepare ().
*/
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
MonoMethodSignature *sig = dinfo->sig;
DynCallArgs *dargs = (DynCallArgs*)buf;
guint8 *ret = dargs->ret;
host_mgreg_t res = dargs->res;
MonoType *sig_ret = mini_get_underlying_type (sig->ret);
int i;
switch (sig_ret->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
case MONO_TYPE_OBJECT:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
*(gpointer*)ret = GREG_TO_PTR (res);
break;
case MONO_TYPE_I1:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
*(gint32*)ret = res;
break;
case MONO_TYPE_U4:
*(guint32*)ret = res;
break;
case MONO_TYPE_I8:
*(gint64*)ret = res;
break;
case MONO_TYPE_U8:
*(guint64*)ret = res;
break;
case MONO_TYPE_R4:
*(float*)ret = *(float*)&(dargs->fregs [0]);
break;
case MONO_TYPE_R8:
*(double*)ret = dargs->fregs [0];
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (sig_ret)) {
*(gpointer*)ret = GREG_TO_PTR(res);
break;
} else {
/* Fall through */
}
case MONO_TYPE_VALUETYPE:
if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg) {
/* Nothing to do */
} else {
ArgInfo *ainfo = &dinfo->cinfo->ret;
g_assert (ainfo->storage == ArgValuetypeInReg);
for (i = 0; i < 2; ++i) {
switch (ainfo->pair_storage [0]) {
case ArgInIReg:
((host_mgreg_t*)ret)[i] = res;
break;
case ArgInDoubleSSEReg:
((double*)ret)[i] = dargs->fregs [i];
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
break;
}
}
}
break;
default:
g_assert_not_reached ();
}
}
#undef PTR_TO_GREG
#undef GREG_TO_PTR
/* emit an exception if condition is fail */
#define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
do { \
MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
if (tins == NULL) { \
mono_add_patch_info (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_EXC, exc_name); \
x86_branch32 (code, cond, 0, signed); \
} else { \
EMIT_COND_BRANCH (tins, cond, signed); \
} \
} while (0);
#define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \
amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \
amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \
amd64_ ##op (code); \
amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \
amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \
} while (0);
#ifndef DISABLE_JIT
static guint8*
emit_call (MonoCompile *cfg, MonoCallInst *call, guint8 *code, MonoJitICallId jit_icall_id)
{
gboolean no_patch = FALSE;
MonoJumpInfoTarget patch;
// FIXME? This is similar to mono_call_to_patch, except it favors MONO_PATCH_INFO_ABS over call->jit_icall_id.
if (jit_icall_id) {
g_assert (!call);
patch.type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch.target = GUINT_TO_POINTER (jit_icall_id);
} else if (call->inst.flags & MONO_INST_HAS_METHOD) {
patch.type = MONO_PATCH_INFO_METHOD;
patch.target = call->method;
} else {
patch.type = MONO_PATCH_INFO_ABS;
patch.target = call->fptr;
}
/*
* FIXME: Add support for thunks
*/
{
gboolean near_call = FALSE;
/*
* Indirect calls are expensive so try to make a near call if possible.
* The caller memory is allocated by the code manager so it is
* guaranteed to be at a 32 bit offset.
*/
if (patch.type != MONO_PATCH_INFO_ABS) {
/* The target is in memory allocated using the code manager */
near_call = TRUE;
if (patch.type == MONO_PATCH_INFO_METHOD) {
MonoMethod* const method = call->method;
if (m_class_get_image (method->klass)->aot_module)
/* The callee might be an AOT method */
near_call = FALSE;
if (method->dynamic)
/* The target is in malloc-ed memory */
near_call = FALSE;
} else {
/*
* The call might go directly to a native function without
* the wrapper.
*/
MonoJitICallInfo * const mi = mono_find_jit_icall_info (jit_icall_id);
gconstpointer target = mono_icall_get_wrapper (mi);
if ((((guint64)target) >> 32) != 0)
near_call = FALSE;
}
} else {
MonoJumpInfo *jinfo = NULL;
if (cfg->abs_patches)
jinfo = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, call->fptr);
if (jinfo) {
if (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
MonoJitICallInfo *mi = mono_find_jit_icall_info (jinfo->data.jit_icall_id);
if (mi && (((guint64)mi->func) >> 32) == 0)
near_call = TRUE;
no_patch = TRUE;
} else {
/*
* This is not really an optimization, but required because the
* generic class init trampolines use R11 to pass the vtable.
*/
near_call = TRUE;
}
} else {
jit_icall_id = call->jit_icall_id;
if (jit_icall_id) {
MonoJitICallInfo const *info = mono_find_jit_icall_info (jit_icall_id);
// Change patch from MONO_PATCH_INFO_ABS to MONO_PATCH_INFO_JIT_ICALL_ID.
patch.type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch.target = GUINT_TO_POINTER (jit_icall_id);
if (info->func == info->wrapper) {
/* No wrapper */
if ((((guint64)info->func) >> 32) == 0)
near_call = TRUE;
} else {
/* ?See the comment in mono_codegen ()? */
near_call = TRUE;
}
}
else if ((((guint64)patch.target) >> 32) == 0) {
near_call = TRUE;
no_patch = TRUE;
}
}
}
if (cfg->method->dynamic)
/* These methods are allocated using malloc */
near_call = FALSE;
#ifdef MONO_ARCH_NOMAP32BIT
near_call = FALSE;
#endif
/* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */
if (optimize_for_xen)
near_call = FALSE;
if (cfg->compile_aot) {
near_call = TRUE;
no_patch = TRUE;
}
if (near_call) {
/*
* Align the call displacement to an address divisible by 4 so it does
* not span cache lines. This is required for code patching to work on SMP
* systems.
*/
if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0) {
guint32 pad_size = 4 - ((guint32)(code + 1 - cfg->native_code) % 4);
amd64_padding (code, pad_size);
}
mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target);
amd64_call_code (code, 0);
}
else {
if (!no_patch && ((guint32)(code + 2 - cfg->native_code) % 8) != 0) {
guint32 pad_size = 8 - ((guint32)(code + 2 - cfg->native_code) % 8);
amd64_padding (code, pad_size);
g_assert ((guint64)(code + 2 - cfg->native_code) % 8 == 0);
}
mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target);
amd64_set_reg_template (code, GP_SCRATCH_REG);
amd64_call_reg (code, GP_SCRATCH_REG);
}
}
set_code_cursor (cfg, code);
return code;
}
static int
store_membase_imm_to_store_membase_reg (int opcode)
{
switch (opcode) {
case OP_STORE_MEMBASE_IMM:
return OP_STORE_MEMBASE_REG;
case OP_STOREI4_MEMBASE_IMM:
return OP_STOREI4_MEMBASE_REG;
case OP_STOREI8_MEMBASE_IMM:
return OP_STOREI8_MEMBASE_REG;
}
return -1;
}
#define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB) || ((opcode) == OP_ISBB_IMM)))
/*
* mono_arch_peephole_pass_1:
*
* Perform peephole opts which should/can be performed before local regalloc
*/
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_IADD_IMM:
case OP_LADD_IMM:
if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS) && (ins->inst_imm > 0)) {
/*
* X86_LEA is like ADD, but doesn't have the
* sreg1==dreg restriction. inst_imm > 0 is needed since LEA sign-extends
* its operand to 64 bit.
*/
ins->opcode = ins->opcode == OP_IADD_IMM ? OP_X86_LEA_MEMBASE : OP_AMD64_LEA_MEMBASE;
ins->inst_basereg = ins->sreg1;
}
break;
case OP_LXOR:
case OP_IXOR:
if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) {
MonoInst *ins2;
/*
* Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since
* the latter has length 2-3 instead of 6 (reverse constant
* propagation). These instruction sequences are very common
* in the initlocals bblock.
*/
for (ins2 = ins->next; ins2; ins2 = ins2->next) {
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
} else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
/* Continue */
} else if (ins2->opcode == OP_IL_SEQ_POINT) {
/* Continue */
} else {
break;
}
}
}
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
/* OP_COMPARE_IMM (reg, 0)
* -->
* OP_AMD64_TEST_NULL (reg)
*/
if (!ins->inst_imm)
ins->opcode = OP_AMD64_TEST_NULL;
break;
case OP_ICOMPARE_IMM:
if (!ins->inst_imm)
ins->opcode = OP_X86_TEST_NULL;
break;
case OP_AMD64_ICOMPARE_MEMBASE_IMM:
/*
* OP_STORE_MEMBASE_REG reg, offset(basereg)
* OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
* -->
* OP_STORE_MEMBASE_REG reg, offset(basereg)
* OP_COMPARE_IMM reg, imm
*
* Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
*/
if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = OP_ICOMPARE_IMM;
ins->sreg1 = last_ins->sreg1;
/* check if we can remove cmp reg,0 with test null */
if (!ins->inst_imm)
ins->opcode = OP_X86_TEST_NULL;
}
break;
}
mono_peephole_ins (bb, ins);
}
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_ICONST:
case OP_I8CONST: {
MonoInst *next = mono_inst_next (ins, FILTER_IL_SEQ_POINT);
/* reg = 0 -> XOR (reg, reg) */
/* XOR sets cflags on x86, so we cant do it always */
if (ins->inst_c0 == 0 && (!next || (next && INST_IGNORES_CFLAGS (next->opcode)))) {
ins->opcode = OP_LXOR;
ins->sreg1 = ins->dreg;
ins->sreg2 = ins->dreg;
/* Fall through */
} else {
break;
}
}
case OP_LXOR:
/*
* Use IXOR to avoid a rex prefix if possible. The cpu will sign extend the
* 0 result into 64 bits.
*/
if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) {
ins->opcode = OP_IXOR;
}
/* Fall through */
case OP_IXOR:
if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) {
MonoInst *ins2;
/*
* Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since
* the latter has length 2-3 instead of 6 (reverse constant
* propagation). These instruction sequences are very common
* in the initlocals bblock.
*/
for (ins2 = ins->next; ins2; ins2 = ins2->next) {
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
} else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START) || (ins2->opcode == OP_GC_LIVENESS_DEF) || (ins2->opcode == OP_GC_LIVENESS_USE)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
/* Continue */
} else if (ins2->opcode == OP_IL_SEQ_POINT) {
/* Continue */
} else {
break;
}
}
}
break;
case OP_IADD_IMM:
if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
ins->opcode = OP_X86_INC_REG;
break;
case OP_ISUB_IMM:
if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
ins->opcode = OP_X86_DEC_REG;
break;
}
mono_peephole_ins (bb, ins);
}
}
#define NEW_INS(cfg,ins,dest,op) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
(dest)->cil_code = (ins)->cil_code; \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
#define NEW_SIMD_INS(cfg,ins,dest,op,d,s1,s2) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
(dest)->cil_code = (ins)->cil_code; \
(dest)->dreg = d; \
(dest)->sreg1 = s1; \
(dest)->sreg2 = s2; \
(dest)->type = STACK_VTYPE; \
(dest)->klass = ins->klass; \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
static int
simd_type_to_comp_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PCMPEQB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PCMPEQW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PCMPEQD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PCMPEQQ; // SSE 4.1
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PCMPEQQ; // SSE 4.1
#else
return OP_PCMPEQD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_sub_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PSUBB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PSUBW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PSUBD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PSUBQ;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PSUBQ;
#else
return OP_PSUBD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_shl_op (int t)
{
switch (t) {
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PSHLW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PSHLD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PSHLQ;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PSHLD;
#else
return OP_PSHLQ;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_gt_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PCMPGTB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PCMPGTW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PCMPGTD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PCMPGTQ; // SSE 4.2
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PCMPGTQ; // SSE 4.2
#else
return OP_PCMPGTD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_max_un_op (int t)
{
switch (t) {
case MONO_TYPE_U1:
return OP_PMAXB_UN;
case MONO_TYPE_U2:
return OP_PMAXW_UN; // SSE 4.1
case MONO_TYPE_U4:
return OP_PMAXD_UN; // SSE 4.1
//case MONO_TYPE_U8:
// return OP_PMAXQ_UN; // AVX
#if TARGET_SIZEOF_VOID_P == 8
//case MONO_TYPE_U:
// return OP_PMAXQ_UN; // AVX
#else
case MONO_TYPE_U:
return OP_PMAXD_UN; // SSE 4.1
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_add_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PADDB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PADDW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PADDD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PADDQ;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PADDQ;
#else
return OP_PADDD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_min_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
return OP_PMINB; // SSE 4.1
case MONO_TYPE_U1:
return OP_PMINB_UN; // SSE 4.1
case MONO_TYPE_I2:
return OP_PMINW;
case MONO_TYPE_U2:
return OP_PMINW_UN;
case MONO_TYPE_I4:
return OP_PMIND; // SSE 4.1
case MONO_TYPE_U4:
return OP_PMIND_UN; // SSE 4.1
// case MONO_TYPE_I8: // AVX
// case MONO_TYPE_U8:
#if TARGET_SIZEOF_VOID_P == 8
//case MONO_TYPE_I: // AVX
//case MONO_TYPE_U:
#else
case MONO_TYPE_I:
return OP_PMIND; // SSE 4.1
case MONO_TYPE_U:
return OP_PMIND_UN; // SSE 4.1
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_max_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
return OP_PMAXB; // SSE 4.1
case MONO_TYPE_U1:
return OP_PMAXB_UN; // SSE 4.1
case MONO_TYPE_I2:
return OP_PMAXW;
case MONO_TYPE_U2:
return OP_PMAXW_UN;
case MONO_TYPE_I4:
return OP_PMAXD; // SSE 4.1
case MONO_TYPE_U4:
return OP_PMAXD_UN; // SSE 4.1
// case MONO_TYPE_I8: // AVX
// case MONO_TYPE_U8:
#if TARGET_SIZEOF_VOID_P == 8
//case MONO_TYPE_I: // AVX
//case MONO_TYPE_U:
#else
case MONO_TYPE_I:
return OP_PMAXD; // SSE 4.1
case MONO_TYPE_U:
return OP_PMAXD_UN; // SSE 4.1
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static void
emit_simd_comp_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (!mono_hwcap_x86_has_sse42 && (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8 || is64BitNativeInt)) {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_reg2, temp_reg1, -1);
temp->inst_c0 = 0xB1;
NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, dreg, temp_reg1, temp_reg2);
} else {
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_comp_op (type), dreg, sreg1, sreg2);
}
}
static void
emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2);
static void
emit_simd_gt_un_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
switch (type) {
case MONO_TYPE_U2:
case MONO_TYPE_U4:
if (mono_hwcap_x86_has_sse41)
goto USE_MAX;
goto USE_SIGNED_GT;
case MONO_TYPE_U1:
USE_MAX: {
// dreg = max(sreg1, sreg2) != sreg2
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
int temp_reg3 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (type), temp_reg1, sreg1, sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, temp_reg1, ins->sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg3, -1, -1);
NEW_SIMD_INS (cfg, ins, temp, OP_XORPD, dreg, temp_reg2, temp_reg3);
break;
}
case MONO_TYPE_U8:
USE_SIGNED_GT: {
// convert to signed integer by subtracting (1 << (size - 1)) from each operand
// and then use signed comparison
int temp_c0 = mono_alloc_ireg (cfg);
int temp_c80 = mono_alloc_ireg (cfg);
int temp_s1 = mono_alloc_ireg (cfg);
int temp_s2 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_c0, -1, -1);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_shl_op (type), temp_c80, temp_c0, -1);
temp->inst_imm = type == MONO_TYPE_U2 ? 15 : (type == MONO_TYPE_U4 ? 31 : 63);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s1, sreg1, temp_c80);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s2, sreg2, temp_c80);
emit_simd_gt_op (cfg, bb, ins, type, dreg, temp_s1, temp_s2);
break;
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
goto USE_SIGNED_GT;
#else
if (mono_hwcap_x86_has_sse41)
goto USE_MAX;
goto USE_SIGNED_GT;
#endif
}
}
}
static void
emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (!mono_hwcap_x86_has_sse42 && (type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt)) {
// Decompose 64-bit greater than to 32-bit
//
// t = (v1 > v2)
// u = (v1 == v2)
// v = (v1 > v2) unsigned
//
// z = shuffle(t, (3, 3, 1, 1))
// t1 = shuffle(v, (2, 2, 0, 0))
// u1 = shuffle(u, (3, 3, 1, 1))
// w = and(t1, u1)
// result = bitwise_or(z, w)
int temp_t = mono_alloc_ireg (cfg);
int temp_u = mono_alloc_ireg (cfg);
int temp_v = mono_alloc_ireg (cfg);
int temp_z = temp_t;
int temp_t1 = temp_v;
int temp_u1 = temp_u;
int temp_w = temp_t1;
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPGTD, temp_t, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_u, sreg1, sreg2);
emit_simd_gt_un_op (cfg, bb, ins, MONO_TYPE_U4, temp_v, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_z, temp_t, -1);
temp->inst_c0 = 0xF5;
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_t1, temp_v, -1);
temp->inst_c0 = 0xA0;
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_u1, temp_u, -1);
temp->inst_c0 = 0xF5;
NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, temp_w, temp_t1, temp_u1);
NEW_SIMD_INS (cfg, ins, temp, OP_ORPD, dreg, temp_z, temp_w);
} else {
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_gt_op (type), dreg, sreg1, sreg2);
}
}
static void
emit_simd_min_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) {
// SSE2, so always available
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2);
} else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) {
// Decompose to t = (s1 > s2), d = (s1 & !t) | (s2 & t)
int temp_t = mono_alloc_ireg (cfg);
int temp_d1 = mono_alloc_ireg (cfg);
int temp_d2 = mono_alloc_ireg (cfg);
if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1)
emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
else
emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d1, temp_t, sreg1);
NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d2, temp_t, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2);
} else {
// SSE 4.1 has byte- and dword- operations
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2);
}
}
static void
emit_simd_max_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) {
// SSE2, so always available
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2);
} else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) {
// Decompose to t = (s1 > s2), d = (s1 & t) | (s2 & !t)
int temp_t = mono_alloc_ireg (cfg);
int temp_d1 = mono_alloc_ireg (cfg);
int temp_d2 = mono_alloc_ireg (cfg);
if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1)
emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
else
emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d1, temp_t, sreg1);
NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d2, temp_t, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2);
} else {
// SSE 4.1 has byte- and dword- operations
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2);
}
}
/*
* mono_arch_lowering_pass:
*
* Converts complex opcodes into simpler ones so that each IR instruction
* corresponds to one machine instruction.
*/
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n, *temp;
/*
* FIXME: Need to add more instructions, but the current machine
* description can't model some parts of the composite instructions like
* cdq.
*/
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_DIV_IMM:
case OP_REM_IMM:
case OP_IDIV_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LREM_IMM:
case OP_IREM_IMM:
mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->opcode = OP_COMPARE;
ins->sreg2 = temp->dreg;
}
break;
#ifndef MONO_ARCH_ILP32
case OP_LOAD_MEMBASE:
#endif
case OP_LOADI8_MEMBASE:
/* Don't generate memindex opcodes (to simplify */
/* read sandboxing) */
if (!amd64_use_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
ins->inst_indexreg = temp->dreg;
}
break;
#ifndef MONO_ARCH_ILP32
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->opcode = OP_STOREI8_MEMBASE_REG;
ins->sreg1 = temp->dreg;
}
break;
#ifdef MONO_ARCH_SIMD_INTRINSICS
case OP_EXPAND_I1: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
int original_reg = ins->sreg1;
NEW_INS (cfg, ins, temp, OP_ICONV_TO_U1);
temp->sreg1 = original_reg;
temp->dreg = temp_reg1;
NEW_INS (cfg, ins, temp, OP_SHL_IMM);
temp->sreg1 = temp_reg1;
temp->dreg = temp_reg2;
temp->inst_imm = 8;
NEW_INS (cfg, ins, temp, OP_LOR);
temp->sreg1 = temp->dreg = temp_reg2;
temp->sreg2 = temp_reg1;
ins->opcode = OP_EXPAND_I2;
ins->sreg1 = temp_reg2;
break;
}
case OP_XEQUAL: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, ins->sreg1, ins->sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_EXTRACT_MASK, temp_reg2, temp_reg1, -1);
temp->type = STACK_I4;
NEW_INS (cfg, ins, temp, OP_COMPARE_IMM);
temp->sreg1 = temp_reg2;
temp->inst_imm = 0xFFFF;
temp->klass = ins->klass;
ins->opcode = OP_CEQ;
ins->sreg1 = -1;
ins->sreg2 = -1;
break;
}
case OP_XCOMPARE: {
int temp_reg;
gboolean is64BitNativeInt = FALSE;
switch (ins->inst_c0)
{
case CMP_EQ:
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case CMP_NE: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg2, -1, -1);
ins->opcode = OP_XORPD;
ins->sreg1 = temp_reg1;
ins->sreg1 = temp_reg2;
break;
}
case CMP_LT:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GT:
emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case CMP_LE:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GE: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2);
ins->opcode = OP_POR;
ins->sreg1 = temp_reg1;
ins->sreg2 = temp_reg2;
break;
}
case CMP_LE_UN:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GE_UN:
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_U;
#endif
if (mono_hwcap_x86_has_sse41 && ins->inst_c1 != MONO_TYPE_U8 && !is64BitNativeInt) {
int temp_reg1 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (ins->inst_c1), temp_reg1, ins->sreg1, ins->sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, temp_reg1, ins->sreg1);
NULLIFY_INS (ins);
} else {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2);
ins->opcode = OP_POR;
ins->sreg1 = temp_reg1;
ins->sreg2 = temp_reg2;
}
break;
case CMP_LT_UN:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GT_UN: {
emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
}
default:
g_assert_not_reached();
break;
}
ins->type = STACK_VTYPE;
ins->inst_c0 = 0;
break;
}
case OP_XCOMPARE_FP: {
ins->opcode = ins->inst_c1 == MONO_TYPE_R4 ? OP_COMPPS : OP_COMPPD;
switch (ins->inst_c0)
{
case CMP_EQ: ins->inst_c0 = 0; break;
case CMP_NE: ins->inst_c0 = 4; break;
case CMP_LT: ins->inst_c0 = 1; break;
case CMP_LE: ins->inst_c0 = 2; break;
case CMP_GT: ins->inst_c0 = 6; break;
case CMP_GE: ins->inst_c0 = 5; break;
default:
g_assert_not_reached();
break;
}
break;
}
case OP_XCAST: {
ins->opcode = OP_XMOVE;
break;
}
case OP_XBINOP: {
switch (ins->inst_c0)
{
case OP_ISUB:
ins->opcode = simd_type_to_sub_op (ins->inst_c1);
break;
case OP_IADD:
ins->opcode = simd_type_to_add_op (ins->inst_c1);
break;
case OP_IAND:
ins->opcode = OP_ANDPD;
break;
case OP_IXOR:
ins->opcode = OP_XORPD;
break;
case OP_IOR:
ins->opcode = OP_ORPD;
break;
case OP_IMIN:
emit_simd_min_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case OP_IMAX:
emit_simd_max_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case OP_FSUB:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_SUBPD : OP_SUBPS;
break;
case OP_FADD:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_ADDPD : OP_ADDPS;
break;
case OP_FDIV:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_DIVPD : OP_DIVPS;
break;
case OP_FMUL:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MULPD : OP_MULPS;
break;
case OP_FMIN:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MINPD : OP_MINPS;
break;
case OP_FMAX:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MAXPD : OP_MAXPS;
break;
default:
g_assert_not_reached();
break;
}
break;
}
case OP_XEXTRACT_R4:
case OP_XEXTRACT_R8:
case OP_XEXTRACT_I4:
case OP_XEXTRACT_I8: {
// TODO
g_assert_not_reached();
break;
}
#endif
default:
break;
}
}
bb->max_vreg = cfg->next_vreg;
}
static const int
branch_cc_table [] = {
X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
};
/* Maps CMP_... constants to X86_CC_... constants */
static const int
cc_table [] = {
X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT,
X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT
};
static const int
cc_signed_table [] = {
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE
};
/*#include "cprop.c"*/
static unsigned char*
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
// Use 8 as register size to get Nan/Inf conversion to uint result truncated to 0
if (size == 8 || (!is_signed && size == 4))
amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg);
else
amd64_sse_cvttsd2si_reg_reg_size (code, dreg, sreg, 4);
if (size == 1)
amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
else if (size == 2)
amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
return code;
}
static unsigned char*
mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree)
{
int sreg = tree->sreg1;
int need_touch = FALSE;
#if defined(TARGET_WIN32)
need_touch = TRUE;
#elif defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
if (!(tree->flags & MONO_INST_INIT))
need_touch = TRUE;
#endif
if (need_touch) {
guint8* br[5];
/*
* Under Windows:
* If requested stack size is larger than one page,
* perform stack-touch operation
*/
/*
* Generate stack probe code.
* Under Windows, it is necessary to allocate one page at a time,
* "touching" stack after each successful sub-allocation. This is
* because of the way stack growth is implemented - there is a
* guard page before the lowest stack page that is currently commited.
* Stack normally grows sequentially so OS traps access to the
* guard page and commits more pages when needed.
*/
amd64_test_reg_imm (code, sreg, ~0xFFF);
br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
br[2] = code; /* loop */
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
amd64_patch (br[3], br[2]);
amd64_test_reg_reg (code, sreg, sreg);
br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
br[1] = code; x86_jump8 (code, 0);
amd64_patch (br[0], code);
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
amd64_patch (br[1], code);
amd64_patch (br[4], code);
}
else
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
if (tree->flags & MONO_INST_INIT) {
int offset = 0;
if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
amd64_push_reg (code, AMD64_RAX);
offset += 8;
}
if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
amd64_push_reg (code, AMD64_RCX);
offset += 8;
}
if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
amd64_push_reg (code, AMD64_RDI);
offset += 8;
}
amd64_shift_reg_imm (code, X86_SHR, sreg, 3);
if (sreg != AMD64_RCX)
amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
amd64_cld (code);
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
amd64_pop_reg (code, AMD64_RDI);
if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
amd64_pop_reg (code, AMD64_RCX);
if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
amd64_pop_reg (code, AMD64_RAX);
}
return code;
}
static guint8*
emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
{
CallInfo *cinfo;
guint32 quad;
/* Move return value to the target register */
/* FIXME: do this in the local reg allocator */
switch (ins->opcode) {
case OP_CALL:
case OP_CALL_REG:
case OP_CALL_MEMBASE:
case OP_LCALL:
case OP_LCALL_REG:
case OP_LCALL_MEMBASE:
g_assert (ins->dreg == AMD64_RAX);
break;
case OP_FCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE: {
MonoType *rtype = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
if (rtype->type == MONO_TYPE_R4) {
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
else {
if (ins->dreg != AMD64_XMM0)
amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
break;
}
case OP_RCALL:
case OP_RCALL_REG:
case OP_RCALL_MEMBASE:
if (ins->dreg != AMD64_XMM0)
amd64_sse_movss_reg_reg (code, ins->dreg, AMD64_XMM0);
break;
case OP_VCALL:
case OP_VCALL_REG:
case OP_VCALL_MEMBASE:
case OP_VCALL2:
case OP_VCALL2_REG:
case OP_VCALL2_MEMBASE:
cinfo = get_call_info (cfg->mempool, ((MonoCallInst*)ins)->signature);
if (cinfo->ret.storage == ArgValuetypeInReg) {
MonoInst *loc = cfg->arch.vret_addr_loc;
/* Load the destination address */
g_assert (loc->opcode == OP_REGOFFSET);
amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer));
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
case ArgInIReg:
amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof (target_mgreg_t)), cinfo->ret.pair_regs [quad], sizeof (target_mgreg_t));
break;
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
break;
case ArgNone:
break;
default:
NOT_IMPLEMENTED;
}
}
}
break;
}
return code;
}
#endif /* DISABLE_JIT */
#ifdef TARGET_MACH
static int tls_gs_offset;
#endif
gboolean
mono_arch_have_fast_tls (void)
{
#ifdef TARGET_MACH
static gboolean have_fast_tls = FALSE;
static gboolean inited = FALSE;
guint8 *ins;
if (mini_debug_options.use_fallback_tls)
return FALSE;
if (inited)
return have_fast_tls;
ins = (guint8*)pthread_getspecific;
/*
* We're looking for these two instructions:
*
* mov %gs:[offset](,%rdi,8),%rax
* retq
*/
have_fast_tls = ins [0] == 0x65 &&
ins [1] == 0x48 &&
ins [2] == 0x8b &&
ins [3] == 0x04 &&
ins [4] == 0xfd &&
ins [6] == 0x00 &&
ins [7] == 0x00 &&
ins [8] == 0x00 &&
ins [9] == 0xc3;
tls_gs_offset = ins[5];
/*
* Apple now loads a different version of pthread_getspecific when launched from Xcode
* For that version we're looking for these instructions:
*
* pushq %rbp
* movq %rsp, %rbp
* mov %gs:[offset](,%rdi,8),%rax
* popq %rbp
* retq
*/
if (!have_fast_tls) {
have_fast_tls = ins [0] == 0x55 &&
ins [1] == 0x48 &&
ins [2] == 0x89 &&
ins [3] == 0xe5 &&
ins [4] == 0x65 &&
ins [5] == 0x48 &&
ins [6] == 0x8b &&
ins [7] == 0x04 &&
ins [8] == 0xfd &&
ins [10] == 0x00 &&
ins [11] == 0x00 &&
ins [12] == 0x00 &&
ins [13] == 0x5d &&
ins [14] == 0xc3;
tls_gs_offset = ins[9];
}
inited = TRUE;
return have_fast_tls;
#elif defined(TARGET_ANDROID)
return FALSE;
#else
if (mini_debug_options.use_fallback_tls)
return FALSE;
return TRUE;
#endif
}
int
mono_amd64_get_tls_gs_offset (void)
{
#ifdef TARGET_OSX
return tls_gs_offset;
#else
g_assert_not_reached ();
return -1;
#endif
}
/*
* \param code buffer to store code to
* \param dreg hard register where to place the result
* \param tls_offset offset info
* \return a pointer to the end of the stored code
*
* mono_amd64_emit_tls_get emits in \p code the native code that puts in
* the dreg register the item in the thread local storage identified
* by tls_offset.
*/
static guint8*
mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
#ifdef TARGET_WIN32
if (tls_offset < 64) {
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8);
} else {
guint8 *buf [16];
g_assert (tls_offset < 0x440);
/* Load TEB->TlsExpansionSlots */
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, 0x1780, 8);
amd64_test_reg_reg (code, dreg, dreg);
buf [0] = code;
amd64_branch (code, X86_CC_EQ, code, TRUE);
amd64_mov_reg_membase (code, dreg, dreg, (tls_offset * 8) - 0x200, 8);
amd64_patch (buf [0], code);
}
#elif defined(TARGET_MACH)
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 8), 8);
#else
if (optimize_for_xen) {
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_reg_mem (code, dreg, 0, 8);
amd64_mov_reg_membase (code, dreg, dreg, tls_offset, 8);
} else {
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_reg_mem (code, dreg, tls_offset, 8);
}
#endif
return code;
}
static guint8*
mono_amd64_emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
#ifdef TARGET_WIN32
g_assert_not_reached ();
#elif defined(TARGET_MACH)
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_mem_reg (code, tls_gs_offset + (tls_offset * 8), sreg, 8);
#else
g_assert (!optimize_for_xen);
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_mem_reg (code, tls_offset, sreg, 8);
#endif
return code;
}
/*
* emit_setup_lmf:
*
* Emit code to initialize an LMF structure at LMF_OFFSET.
*/
static guint8*
emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset)
{
/*
* The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
*/
/*
* sp is saved right before calls but we need to save it here too so
* async stack walks would work.
*/
amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
/* Save rbp */
amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_RBP, 8);
if (cfg->arch.omit_fp && cfa_offset != -1)
mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - (cfa_offset - (lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp))));
/* These can't contain refs */
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
/* These are handled automatically by the stack marking code */
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
return code;
}
#ifdef TARGET_WIN32
#define TEB_LAST_ERROR_OFFSET 0x68
static guint8*
emit_get_last_error (guint8* code, int dreg)
{
/* Threads last error value is located in TEB_LAST_ERROR_OFFSET. */
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, TEB_LAST_ERROR_OFFSET, sizeof (guint32));
return code;
}
#else
static guint8*
emit_get_last_error (guint8* code, int dreg)
{
g_assert_not_reached ();
}
#endif
/* benchmark and set based on cpu */
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
#ifndef DISABLE_JIT
static guint8*
amd64_handle_varargs_nregs (guint8 *code, guint32 nregs)
{
#ifndef TARGET_WIN32
if (nregs)
amd64_mov_reg_imm (code, AMD64_RAX, nregs);
else
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
#endif
return code;
}
static guint8*
amd64_handle_varargs_call (MonoCompile *cfg, guint8 *code, MonoCallInst *call, gboolean free_rax)
{
#ifdef TARGET_WIN32
return code;
#else
/*
* The AMD64 ABI forces callers to know about varargs.
*/
guint32 nregs = 0;
if (call->signature->call_convention == MONO_CALL_VARARG && call->signature->pinvoke) {
// deliberatly nothing -- but nreg = 0 and do not return
} else if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && m_class_get_image (cfg->method->klass) != mono_defaults.corlib) {
/*
* Since the unmanaged calling convention doesn't contain a
* 'vararg' entry, we have to treat every pinvoke call as a
* potential vararg call.
*/
for (guint32 i = 0; i < AMD64_XMM_NREG; ++i)
nregs += (call->used_fregs & (1 << i)) != 0;
} else {
return code;
}
MonoInst *ins = (MonoInst*)call;
if (free_rax && ins->sreg1 == AMD64_RAX) {
amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
ins->sreg1 = AMD64_R11;
}
return amd64_handle_varargs_nregs (code, nregs);
#endif
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
/* Fix max_offset estimate for each successor bb */
gboolean optimize_branch_pred = (cfg->opt & MONO_OPT_BRANCH) && (cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS);
if (optimize_branch_pred) {
int current_offset = cfg->code_len;
MonoBasicBlock *current_bb;
for (current_bb = bb; current_bb != NULL; current_bb = current_bb->next_bb) {
current_bb->max_offset = current_offset;
current_offset += current_bb->max_length;
}
}
if (cfg->opt & MONO_OPT_LOOP) {
int pad, align = LOOP_ALIGNMENT;
/* set alignment depending on cpu */
if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
pad = align - pad;
/*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
amd64_padding (code, pad);
cfg->code_len += pad;
bb->native_offset = cfg->code_len;
}
}
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
set_code_cursor (cfg, code);
mono_debug_open_block (cfg, bb, code - cfg->native_code);
if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num)
x86_breakpoint (code);
MONO_BB_FOR_EACH_INS (bb, ins) {
const guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
int max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
if (cfg->debug_info)
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
case OP_BIGMUL:
amd64_mul_reg (code, ins->sreg2, TRUE);
break;
case OP_BIGMUL_UN:
amd64_mul_reg (code, ins->sreg2, FALSE);
break;
case OP_X86_SETEQ_MEMBASE:
amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
break;
case OP_STOREI1_MEMBASE_IMM:
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
break;
case OP_STOREI2_MEMBASE_IMM:
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
break;
case OP_STOREI4_MEMBASE_IMM:
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_STOREI1_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
break;
case OP_STOREI2_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
break;
/* In AMD64 NaCl, pointers are 4 bytes, */
/* so STORE_* != STOREI8_*. Likewise below. */
case OP_STORE_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, sizeof(gpointer));
break;
case OP_STOREI8_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
break;
case OP_STOREI4_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
break;
case OP_STORE_MEMBASE_IMM:
/* In NaCl, this could be a PCONST type, which could */
/* mean a pointer type was copied directly into the */
/* lower 32-bits of inst_imm, so for InvalidPtr==-1 */
/* the value would be 0x00000000FFFFFFFF which is */
/* not proper for an imm32 unless you cast it. */
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer));
break;
case OP_STOREI8_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_LOAD_MEM:
#ifdef MONO_ARCH_ILP32
/* In ILP32, pointers are 4 bytes, so separate these */
/* cases, use literal 8 below where we really want 8 */
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer));
break;
#endif
case OP_LOADI8_MEM:
// FIXME: Decompose this earlier
if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
else {
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
}
break;
case OP_LOADI4_MEM:
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_movsxd_reg_membase (code, ins->dreg, ins->dreg, 0);
break;
case OP_LOADU4_MEM:
// FIXME: Decompose this earlier
if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
else {
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
}
break;
case OP_LOADU1_MEM:
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, FALSE);
break;
case OP_LOADU2_MEM:
/* For NaCl, pointers are 4 bytes, so separate these */
/* cases, use literal 8 below where we really want 8 */
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, TRUE);
break;
case OP_LOAD_MEMBASE:
g_assert (amd64_is_imm32 (ins->inst_offset));
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof(gpointer));
break;
case OP_LOADI8_MEMBASE:
/* Use literal 8 instead of sizeof pointer or */
/* register, we really want 8 for this opcode */
g_assert (amd64_is_imm32 (ins->inst_offset));
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 8);
break;
case OP_LOADI4_MEMBASE:
amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU4_MEMBASE:
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
break;
case OP_LOADU1_MEMBASE:
/* The cpu zero extends the result into 64 bits */
amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE, 4);
break;
case OP_LOADI1_MEMBASE:
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
break;
case OP_LOADU2_MEMBASE:
/* The cpu zero extends the result into 64 bits */
amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE, 4);
break;
case OP_LOADI2_MEMBASE:
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
break;
case OP_AMD64_LOADI8_MEMINDEX:
amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, ins->inst_indexreg, 0, 8);
break;
case OP_LCONV_TO_I1:
case OP_ICONV_TO_I1:
case OP_SEXT_I1:
amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
break;
case OP_LCONV_TO_I2:
case OP_ICONV_TO_I2:
case OP_SEXT_I2:
amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
break;
case OP_LCONV_TO_U1:
case OP_ICONV_TO_U1:
amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
break;
case OP_LCONV_TO_U2:
case OP_ICONV_TO_U2:
amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
break;
case OP_ZEXT_I4:
/* Clean out the upper word */
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
break;
case OP_SEXT_I4:
amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_COMPARE:
case OP_LCOMPARE:
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
#if defined(MONO_ARCH_ILP32)
/* Comparison of pointer immediates should be 4 bytes to avoid sign-extend problems */
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
break;
#endif
case OP_LCOMPARE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
break;
case OP_X86_COMPARE_REG_MEMBASE:
amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
break;
case OP_X86_TEST_NULL:
amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
break;
case OP_AMD64_TEST_NULL:
amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
break;
case OP_X86_ADD_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_SUB_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_AND_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_OR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_XOR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_ADD_MEMBASE_IMM:
/* FIXME: Make a 64 version too */
amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_SUB_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_AND_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_OR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_XOR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_ADD_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_SUB_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_AND_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_OR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_XOR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_INC_MEMBASE:
amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
break;
case OP_X86_INC_REG:
amd64_inc_reg_size (code, ins->dreg, 4);
break;
case OP_X86_DEC_MEMBASE:
amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
break;
case OP_X86_DEC_REG:
amd64_dec_reg_size (code, ins->dreg, 4);
break;
case OP_X86_MUL_REG_MEMBASE:
case OP_X86_MUL_MEMBASE_REG:
amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_AMD64_ICOMPARE_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_AMD64_ICOMPARE_MEMBASE_IMM:
amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_AMD64_COMPARE_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_COMPARE_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_X86_COMPARE_MEMBASE8_IMM:
amd64_alu_membase8_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_AMD64_ICOMPARE_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_AMD64_COMPARE_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_ADD_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_SUB_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_AND_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_OR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_XOR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_ADD_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_SUB_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_AND_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_OR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_XOR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_ADD_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_SUB_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_AND_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_OR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_XOR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_BREAK:
amd64_breakpoint (code);
break;
case OP_RELAXED_NOP:
x86_prefix (code, X86_REP_PREFIX);
x86_nop (code);
break;
case OP_HARD_NOP:
x86_nop (code);
break;
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_I8CONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
case OP_SEQ_POINT: {
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
MonoInst *var = cfg->arch.ss_tramp_var;
guint8 *label;
/* Load ss_tramp_var */
/* This is equal to &ss_trampoline */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
/* Load the trampoline address */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
/* Call it if it is non-null */
amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
label = code;
amd64_branch8 (code, X86_CC_Z, 0, FALSE);
amd64_call_reg (code, AMD64_R11);
amd64_patch (label, code);
}
/*
* This is the address which is saved in seq points,
*/
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
if (cfg->compile_aot) {
const guint32 offset = code - cfg->native_code;
guint32 val;
MonoInst *info_var = cfg->arch.seq_point_info_var;
guint8 *label;
/* Load info var */
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
val = ((offset) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either NULL or the address of the breakpoint trampoline */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8);
amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
label = code;
amd64_branch8 (code, X86_CC_Z, 0, FALSE);
/* Call the trampoline */
amd64_call_reg (code, AMD64_R11);
amd64_patch (label, code);
} else {
MonoInst *var = cfg->arch.bp_tramp_var;
guint8 *label;
/*
* Emit a test+branch against a constant, the constant will be overwritten
* by mono_arch_set_breakpoint () to cause the test to fail.
*/
amd64_mov_reg_imm (code, AMD64_R11, 0);
amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
label = code;
amd64_branch8 (code, X86_CC_Z, 0, FALSE);
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
/* Load bp_tramp_var */
/* This is equal to &bp_trampoline */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
/* Call the trampoline */
amd64_call_membase (code, AMD64_R11, 0);
amd64_patch (label, code);
}
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
* to another IL offset.
*/
x86_nop (code);
break;
}
case OP_ADDCC:
case OP_LADDCC:
case OP_LADD:
amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
break;
case OP_ADC:
amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
break;
case OP_ADD_IMM:
case OP_LADD_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
break;
case OP_ADC_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
break;
case OP_SUBCC:
case OP_LSUBCC:
case OP_LSUB:
amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
break;
case OP_SBB:
amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
break;
case OP_SUB_IMM:
case OP_LSUB_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
break;
case OP_SBB_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
break;
case OP_LAND:
amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
break;
case OP_AND_IMM:
case OP_LAND_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
break;
case OP_LMUL:
amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
case OP_IMUL_IMM: {
guint32 size = (ins->opcode == OP_IMUL_IMM) ? 4 : 8;
switch (ins->inst_imm) {
case 2:
/* MOV r1, r2 */
/* ADD r1, r1 */
if (ins->dreg != ins->sreg1)
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, size);
amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
break;
case 3:
/* LEA r1, [r2 + r2*2] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
break;
case 5:
/* LEA r1, [r2 + r2*4] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
break;
case 6:
/* LEA r1, [r2 + r2*2] */
/* ADD r1, r1 */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
break;
case 9:
/* LEA r1, [r2 + r2*8] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
break;
case 10:
/* LEA r1, [r2 + r2*4] */
/* ADD r1, r1 */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
break;
case 12:
/* LEA r1, [r2 + r2*2] */
/* SHL r1, 2 */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
break;
case 25:
/* LEA r1, [r2 + r2*4] */
/* LEA r1, [r1 + r1*4] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
break;
case 100:
/* LEA r1, [r2 + r2*4] */
/* SHL r1, 2 */
/* LEA r1, [r1 + r1*4] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
break;
default:
amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, size);
break;
}
break;
}
case OP_LDIV:
case OP_LREM:
/* Regalloc magic makes the div/rem cases the same */
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_cdq (code);
amd64_div_membase (code, AMD64_RSP, -8, TRUE);
} else {
amd64_cdq (code);
amd64_div_reg (code, ins->sreg2, TRUE);
}
break;
case OP_LDIV_UN:
case OP_LREM_UN:
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_membase (code, AMD64_RSP, -8, FALSE);
} else {
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_reg (code, ins->sreg2, FALSE);
}
break;
case OP_IDIV:
case OP_IREM:
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_cdq_size (code, 4);
amd64_div_membase_size (code, AMD64_RSP, -8, TRUE, 4);
} else {
amd64_cdq_size (code, 4);
amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
}
break;
case OP_IDIV_UN:
case OP_IREM_UN:
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_membase_size (code, AMD64_RSP, -8, FALSE, 4);
} else {
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
}
break;
case OP_LMUL_OVF:
amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
case OP_LOR:
amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
break;
case OP_OR_IMM:
case OP_LOR_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
break;
case OP_LXOR:
amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
break;
case OP_XOR_IMM:
case OP_LXOR_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
break;
case OP_LSHL:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg (code, X86_SHL, ins->dreg);
break;
case OP_LSHR:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg (code, X86_SAR, ins->dreg);
break;
case OP_SHR_IMM:
case OP_LSHR_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
break;
case OP_SHR_UN_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
break;
case OP_LSHR_UN_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
break;
case OP_LSHR_UN:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg (code, X86_SHR, ins->dreg);
break;
case OP_SHL_IMM:
case OP_LSHL_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
break;
case OP_IADDCC:
case OP_IADD:
amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
break;
case OP_IADC:
amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
break;
case OP_IADD_IMM:
amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
break;
case OP_IADC_IMM:
amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISUBCC:
case OP_ISUB:
amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
break;
case OP_ISBB:
amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
break;
case OP_ISUB_IMM:
amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISBB_IMM:
amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
break;
case OP_IAND:
amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
break;
case OP_IAND_IMM:
amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
break;
case OP_IOR:
amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
break;
case OP_IOR_IMM:
amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
break;
case OP_IXOR:
amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
break;
case OP_IXOR_IMM:
amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
break;
case OP_INEG:
amd64_neg_reg_size (code, ins->sreg1, 4);
break;
case OP_INOT:
amd64_not_reg_size (code, ins->sreg1, 4);
break;
case OP_ISHL:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
break;
case OP_ISHR:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
break;
case OP_ISHR_IMM:
amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISHR_UN_IMM:
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISHR_UN:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
break;
case OP_ISHL_IMM:
amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
break;
case OP_IMUL:
amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
break;
case OP_IMUL_OVF:
amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
case OP_IMUL_OVF_UN:
case OP_LMUL_OVF_UN: {
/* the mul operation and the exception check should most likely be split */
int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8;
/*g_assert (ins->sreg2 == X86_EAX);
g_assert (ins->dreg == X86_EAX);*/
if (ins->sreg2 == X86_EAX) {
non_eax_reg = ins->sreg1;
} else if (ins->sreg1 == X86_EAX) {
non_eax_reg = ins->sreg2;
} else {
/* no need to save since we're going to store to it anyway */
if (ins->dreg != X86_EAX) {
saved_eax = TRUE;
amd64_push_reg (code, X86_EAX);
}
amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size);
non_eax_reg = ins->sreg2;
}
if (ins->dreg == X86_EDX) {
if (!saved_eax) {
saved_eax = TRUE;
amd64_push_reg (code, X86_EAX);
}
} else {
saved_edx = TRUE;
amd64_push_reg (code, X86_EDX);
}
amd64_mul_reg_size (code, non_eax_reg, FALSE, size);
/* save before the check since pop and mov don't change the flags */
if (ins->dreg != X86_EAX)
amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size);
if (saved_edx)
amd64_pop_reg (code, X86_EDX);
if (saved_eax)
amd64_pop_reg (code, X86_EAX);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
}
case OP_ICOMPARE:
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
break;
case OP_ICOMPARE_IMM:
amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
break;
case OP_IBEQ:
case OP_IBLT:
case OP_IBGT:
case OP_IBGE:
case OP_IBLE:
case OP_LBEQ:
case OP_LBLT:
case OP_LBGT:
case OP_LBGE:
case OP_LBLE:
case OP_IBNE_UN:
case OP_IBLT_UN:
case OP_IBGT_UN:
case OP_IBGE_UN:
case OP_IBLE_UN:
case OP_LBNE_UN:
case OP_LBLT_UN:
case OP_LBGT_UN:
case OP_LBGE_UN:
case OP_LBLE_UN:
EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
break;
case OP_CMOV_IEQ:
case OP_CMOV_IGE:
case OP_CMOV_IGT:
case OP_CMOV_ILE:
case OP_CMOV_ILT:
case OP_CMOV_INE_UN:
case OP_CMOV_IGE_UN:
case OP_CMOV_IGT_UN:
case OP_CMOV_ILE_UN:
case OP_CMOV_ILT_UN:
case OP_CMOV_LEQ:
case OP_CMOV_LGE:
case OP_CMOV_LGT:
case OP_CMOV_LLE:
case OP_CMOV_LLT:
case OP_CMOV_LNE_UN:
case OP_CMOV_LGE_UN:
case OP_CMOV_LGT_UN:
case OP_CMOV_LLE_UN:
case OP_CMOV_LLT_UN:
g_assert (ins->dreg == ins->sreg1);
/* This needs to operate on 64 bit values */
amd64_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2);
break;
case OP_LNOT:
amd64_not_reg (code, ins->sreg1);
break;
case OP_LNEG:
amd64_neg_reg (code, ins->sreg1);
break;
case OP_ICONST:
case OP_I8CONST:
if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_debug_options.single_imm_size)
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
else
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
break;
case OP_AOTCONST:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, sizeof(gpointer));
break;
case OP_JUMP_TABLE:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
amd64_mov_reg_imm_size (code, ins->dreg, 0, 8);
break;
case OP_MOVE:
if (ins->dreg != ins->sreg1)
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (target_mgreg_t));
break;
case OP_AMD64_SET_XMMREG_R4: {
if (ins->dreg != ins->sreg1)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1);
break;
}
case OP_AMD64_SET_XMMREG_R8: {
if (ins->dreg != ins->sreg1)
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
}
case OP_TAILCALL_PARAMETER:
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL:
case OP_TAILCALL_REG:
case OP_TAILCALL_MEMBASE: {
call = (MonoCallInst*)ins;
int i, save_area_offset;
gboolean tailcall_membase = (ins->opcode == OP_TAILCALL_MEMBASE);
gboolean tailcall_reg = (ins->opcode == OP_TAILCALL_REG);
g_assert (!cfg->method->save_lmf);
max_len += AMD64_NREG * 4;
max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
code = realloc_code (cfg, max_len);
// FIXME hardcoding RAX here is not ideal.
if (tailcall_reg) {
int const reg = ins->sreg1;
g_assert (reg > -1);
if (reg != AMD64_RAX)
amd64_mov_reg_reg (code, AMD64_RAX, reg, 8);
} else if (tailcall_membase) {
int const reg = ins->sreg1;
g_assert (reg > -1);
amd64_mov_reg_membase (code, AMD64_RAX, reg, ins->inst_offset, 8);
} else {
if (cfg->compile_aot) {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
} else {
// FIXME Patch data instead of code.
guint32 pad_size = (guint32)((code + 2 - cfg->native_code) % 8);
if (pad_size)
amd64_padding (code, 8 - pad_size);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
amd64_set_reg_template (code, AMD64_RAX);
}
}
/* Restore callee saved registers */
save_area_offset = cfg->arch.reg_save_area_offset;
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & ((regmask_t)1 << i))) {
amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8);
save_area_offset += 8;
}
if (cfg->arch.omit_fp) {
if (cfg->arch.stack_alloc_size)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
// FIXME:
if (call->stack_usage)
NOT_IMPLEMENTED;
} else {
amd64_push_reg (code, AMD64_RAX);
/* Copy arguments on the stack to our argument area */
// FIXME use rep mov for constant code size, before nonvolatiles
// restored, first saving rsi, rdi into volatiles
for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i + 8, sizeof (target_mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, ARGS_OFFSET + i, AMD64_RAX, sizeof (target_mgreg_t));
}
amd64_pop_reg (code, AMD64_RAX);
#ifdef TARGET_WIN32
amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
amd64_pop_reg (code, AMD64_RBP);
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
#else
amd64_leave (code);
#endif
}
#ifdef TARGET_WIN32
// Redundant REX byte indicates a tailcall to the native unwinder. It means nothing to the processor.
// https://github.com/dotnet/coreclr/blob/966dabb5bb3c4bf1ea885e1e8dc6528e8c64dc4f/src/unwinder/amd64/unwinder_amd64.cpp#L1394
// FIXME This should be jmp rip+32 for AOT direct to same assembly.
// FIXME This should be jmp [rip+32] for AOT direct to not-same assembly (through data).
// FIXME This should be jmp [rip+32] for JIT direct -- patch data instead of code.
// This is only close to ideal for tailcall_membase, and even then it should
// have a more dynamic register allocation.
x86_imm_emit8 (code, 0x48);
amd64_jump_reg (code, AMD64_RAX);
#else
// NT does not have varargs rax use, and NT ABI does not have red zone.
// Use red-zone mov/jmp instead of push/ret to preserve call/ret speculation stack.
// FIXME Just like NT the direct cases are are not ideal.
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8);
code = amd64_handle_varargs_call (cfg, code, call, FALSE);
amd64_jump_membase (code, AMD64_RSP, -8);
#endif
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
amd64_alu_membase_imm_size (code, X86_CMP, ins->sreg1, 0, 0, 4);
break;
case OP_ARGLIST: {
amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie);
amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, sizeof(gpointer));
break;
}
case OP_CALL:
case OP_FCALL:
case OP_RCALL:
case OP_LCALL:
case OP_VCALL:
case OP_VCALL2:
case OP_VOIDCALL:
call = (MonoCallInst*)ins;
code = amd64_handle_varargs_call (cfg, code, call, FALSE);
code = emit_call (cfg, call, code, MONO_JIT_ICALL_ZeroIsReserved);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
call = (MonoCallInst*)ins;
if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
ins->sreg1 = AMD64_R11;
}
code = amd64_handle_varargs_call (cfg, code, call, TRUE);
amd64_call_reg (code, ins->sreg1);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_DYN_CALL: {
int i, limit_reg, index_reg, src_reg, dst_reg;
MonoInst *var = cfg->dyn_call_var;
guint8 *label;
guint8 *buf [16];
g_assert (var->opcode == OP_REGOFFSET);
/* r11 = args buffer filled by mono_arch_get_dyn_call_args () */
amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
/* r10 = ftn */
amd64_mov_reg_reg (code, AMD64_R10, ins->sreg2, 8);
/* Save args buffer */
amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8);
/* Set fp arg regs */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, has_fp), sizeof (target_mgreg_t));
amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
label = code;
amd64_branch8 (code, X86_CC_Z, -1, 1);
for (i = 0; i < FLOAT_PARAM_REGS; ++i)
amd64_sse_movsd_reg_membase (code, i, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + (i * sizeof (double)));
amd64_patch (label, code);
/* Allocate param area */
/* This doesn't need to be freed since OP_DYN_CALL is never called in a loop */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8);
amd64_shift_reg_imm (code, X86_SHL, AMD64_RAX, 3);
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_RAX);
/* Set stack args */
/* rax/rcx/rdx/r8/r9 is scratch */
limit_reg = AMD64_RAX;
index_reg = AMD64_RCX;
src_reg = AMD64_R8;
dst_reg = AMD64_R9;
amd64_mov_reg_membase (code, limit_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8);
amd64_mov_reg_imm (code, index_reg, 0);
amd64_lea_membase (code, src_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS) * sizeof (target_mgreg_t)));
amd64_mov_reg_reg (code, dst_reg, AMD64_RSP, 8);
buf [0] = code;
x86_jump8 (code, 0);
buf [1] = code;
amd64_mov_reg_membase (code, AMD64_RDX, src_reg, 0, 8);
amd64_mov_membase_reg (code, dst_reg, 0, AMD64_RDX, 8);
amd64_alu_reg_imm (code, X86_ADD, index_reg, 1);
amd64_alu_reg_imm (code, X86_ADD, src_reg, 8);
amd64_alu_reg_imm (code, X86_ADD, dst_reg, 8);
amd64_patch (buf [0], code);
amd64_alu_reg_reg (code, X86_CMP, index_reg, limit_reg);
buf [2] = code;
x86_branch8 (code, X86_CC_LT, 0, FALSE);
amd64_patch (buf [2], buf [1]);
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
/* Make the call */
amd64_call_reg (code, AMD64_R10);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
/* Save result */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs), AMD64_XMM0);
amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + sizeof (double), AMD64_XMM1);
break;
}
case OP_AMD64_SAVE_SP_TO_LMF: {
MonoInst *lmf_var = cfg->lmf_var;
amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
break;
}
case OP_X86_PUSH:
g_assert_not_reached ();
amd64_push_reg (code, ins->sreg1);
break;
case OP_X86_PUSH_IMM:
g_assert_not_reached ();
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_push_imm (code, ins->inst_imm);
break;
case OP_X86_PUSH_MEMBASE:
g_assert_not_reached ();
amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
break;
case OP_X86_PUSH_OBJ: {
int size = ALIGN_TO (ins->inst_imm, 8);
g_assert_not_reached ();
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_push_reg (code, AMD64_RDI);
amd64_push_reg (code, AMD64_RSI);
amd64_push_reg (code, AMD64_RCX);
if (ins->inst_offset)
amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
else
amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, (3 * 8));
amd64_mov_reg_imm (code, AMD64_RCX, (size >> 3));
amd64_cld (code);
amd64_prefix (code, X86_REP_PREFIX);
amd64_movsd (code);
amd64_pop_reg (code, AMD64_RCX);
amd64_pop_reg (code, AMD64_RSI);
amd64_pop_reg (code, AMD64_RDI);
break;
}
case OP_GENERIC_CLASS_INIT: {
guint8 *jump;
g_assert (ins->sreg1 == MONO_AMD64_ARG_REG1);
amd64_test_membase_imm_size (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoVTable, initialized), 1, 1);
jump = code;
amd64_branch8 (code, X86_CC_NZ, -1, 1);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_generic_class_init);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
x86_patch (jump, code);
break;
}
case OP_X86_LEA:
amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
break;
case OP_X86_LEA_MEMBASE:
amd64_lea4_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
case OP_AMD64_LEA_MEMBASE:
amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
case OP_X86_XCHG:
amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
break;
case OP_LOCALLOC:
/* keep alignment */
amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
code = mono_emit_stack_alloc (cfg, code, ins);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
break;
case OP_LOCALLOC_IMM: {
guint32 size = ins->inst_imm;
size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
if (ins->flags & MONO_INST_INIT) {
if (size < 64) {
int i;
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
for (i = 0; i < size; i += 8)
amd64_mov_membase_reg (code, AMD64_RSP, i, ins->dreg, 8);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
} else {
amd64_mov_reg_imm (code, ins->dreg, size);
ins->sreg1 = ins->dreg;
code = mono_emit_stack_alloc (cfg, code, ins);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
}
} else {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
}
if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
break;
}
case OP_THROW: {
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_exception);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_rethrow_exception);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
/* Align stack */
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
amd64_call_imm (code, 0);
/*
* ins->inst_eh_blocks and bb->clause_holes are part of same GList.
* Holes from bb->clause_holes will be added separately for the entire
* basic block. Add only the rest of them.
*/
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
/* Restore stack alignment */
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
break;
case OP_START_HANDLER: {
/* Even though we're saving RSP, use sizeof */
/* gpointer because spvar is of type IntPtr */
/* see: mono_create_spvar_for_region */
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, sizeof(gpointer));
if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) ||
MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FILTER) ||
MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FAULT)) &&
cfg->param_area) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
}
break;
}
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
amd64_ret (code);
break;
}
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
/* The local allocator will put the result into RAX */
amd64_ret (code);
break;
}
case OP_GET_EX_OBJ:
if (ins->dreg != AMD64_RAX)
amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, sizeof (target_mgreg_t));
break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
case OP_BR:
//g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
//if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
//break;
if (ins->inst_target_bb->native_offset) {
amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
if (optimize_branch_pred &&
x86_is_imm8 (ins->inst_target_bb->max_offset - offset))
x86_jump8 (code, 0);
else
x86_jump32 (code, 0);
}
break;
case OP_BR_REG:
amd64_jump_reg (code, ins->sreg1);
break;
case OP_ICNEQ:
case OP_ICGE:
case OP_ICLE:
case OP_ICGE_UN:
case OP_ICLE_UN:
case OP_CEQ:
case OP_LCEQ:
case OP_ICEQ:
case OP_CLT:
case OP_LCLT:
case OP_ICLT:
case OP_CGT:
case OP_ICGT:
case OP_LCGT:
case OP_CLT_UN:
case OP_LCLT_UN:
case OP_ICLT_UN:
case OP_CGT_UN:
case OP_LCGT_UN:
case OP_ICGT_UN:
amd64_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_LT:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_GT:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_GE:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_LE:
case OP_COND_EXC_LE_UN:
case OP_COND_EXC_IEQ:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_ILT:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_IGT:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_IGE:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_ILE:
case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], (const char *)ins->inst_p1);
break;
case OP_COND_EXC_OV:
case OP_COND_EXC_NO:
case OP_COND_EXC_C:
case OP_COND_EXC_NC:
EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ],
(ins->opcode < OP_COND_EXC_NE_UN), (const char *)ins->inst_p1);
break;
case OP_COND_EXC_IOV:
case OP_COND_EXC_INO:
case OP_COND_EXC_IC:
case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ],
(ins->opcode < OP_COND_EXC_INE_UN), (const char *)ins->inst_p1);
break;
/* floating point opcodes */
case OP_R8CONST: {
double d = *(double *)ins->inst_p0;
if ((d == 0.0) && (mono_signbit (d) == 0)) {
amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
} else if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, ins->inst_p0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer));
amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_R11, 0);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
break;
}
case OP_R4CONST: {
float f = *(float *)ins->inst_p0;
if ((f == 0.0) && (mono_signbit (f) == 0)) {
amd64_sse_xorps_reg_reg (code, ins->dreg, ins->dreg);
} else {
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, ins->inst_p0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer));
amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_R11, 0);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
}
break;
}
case OP_STORER8_MEMBASE_REG:
amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
break;
case OP_LOADR8_MEMBASE:
amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_STORER4_MEMBASE_REG:
amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
break;
case OP_LOADR4_MEMBASE:
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_ICONV_TO_R4:
amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_ICONV_TO_R8:
amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_LCONV_TO_R4:
amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_R8:
amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_R4:
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
case OP_FCONV_TO_U1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
break;
case OP_FCONV_TO_I2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
break;
case OP_FCONV_TO_U2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
break;
case OP_FCONV_TO_U4:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
break;
case OP_FCONV_TO_I4:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
break;
case OP_FCONV_TO_I:
case OP_FCONV_TO_I8:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
break;
case OP_RCONV_TO_I1:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
break;
case OP_RCONV_TO_U1:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_RCONV_TO_I2:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
break;
case OP_RCONV_TO_U2:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
break;
case OP_RCONV_TO_I4:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_RCONV_TO_U4:
// Use 8 as register size to get Nan/Inf conversion result truncated to 0
amd64_sse_cvtss2si_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_I8:
case OP_RCONV_TO_I:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_RCONV_TO_R8:
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R4:
if (ins->dreg != ins->sreg1)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_R_UN: {
guint8 *br [2];
/* Based on gcc code */
amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
br [0] = code; x86_branch8 (code, X86_CC_S, 0, TRUE);
/* Positive case */
amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
br [1] = code; x86_jump8 (code, 0);
amd64_patch (br [0], code);
/* Negative case */
/* Save to the red zone */
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8);
amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8);
amd64_mov_reg_reg (code, AMD64_RCX, ins->sreg1, 8);
amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, 1);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RAX, 1);
amd64_alu_reg_imm (code, X86_OR, AMD64_RAX, AMD64_RCX);
amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, AMD64_RAX);
amd64_sse_addsd_reg_reg (code, ins->dreg, ins->dreg);
/* Restore */
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, -8, 8);
amd64_patch (br [1], code);
break;
}
case OP_LCONV_TO_OVF_U4:
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
break;
case OP_LCONV_TO_OVF_I4_UN:
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
break;
case OP_FMOVE:
if (ins->dreg != ins->sreg1)
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RMOVE:
if (ins->dreg != ins->sreg1)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I4:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_MOVE_I4_TO_F:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_MOVE_F_TO_I8:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_MOVE_I8_TO_F:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_FADD:
amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FSUB:
amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FMUL:
amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FDIV:
amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FNEG: {
static double r8_0 = -0.0;
g_assert (ins->sreg1 == ins->dreg);
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &r8_0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t));
amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0);
amd64_sse_xorpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &r8_0);
amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
break;
}
case OP_ABS: {
static guint64 d = 0x7fffffffffffffffUL;
g_assert (ins->sreg1 == ins->dreg);
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &d);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t));
amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0);
amd64_sse_andpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &d);
amd64_sse_andpd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
break;
}
case OP_SQRT:
EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1);
break;
case OP_RADD:
amd64_sse_addss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RSUB:
amd64_sse_subss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RMUL:
amd64_sse_mulss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RDIV:
amd64_sse_divss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RNEG: {
static float r4_0 = -0.0;
g_assert (ins->sreg1 == ins->dreg);
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, &r4_0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t));
amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, &r4_0);
amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_RIP, 0);
}
amd64_sse_xorps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
break;
}
case OP_IMIN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2, 4);
break;
case OP_IMIN_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2, 4);
break;
case OP_IMAX:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2, 4);
break;
case OP_IMAX_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2, 4);
break;
case OP_LMIN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2);
break;
case OP_LMIN_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2);
break;
case OP_LMAX:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2);
break;
case OP_LMAX_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2);
break;
case OP_X86_FPOP:
break;
case OP_FCOMPARE:
/*
* The two arguments are swapped because the fbranch instructions
* depend on this for the non-sse case to work.
*/
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
break;
case OP_RCOMPARE:
/*
* FIXME: Get rid of this.
* The two arguments are swapped because the fbranch instructions
* depend on this for the non-sse case to work.
*/
amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1);
break;
case OP_FCNEQ:
case OP_FCEQ: {
/* zeroing the register at the start results in
* shorter and faster code (we can also remove the widening op)
*/
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
if (ins->opcode == OP_FCEQ) {
amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
} else {
guchar *jump_to_end;
amd64_set_reg (code, X86_CC_NE, ins->dreg, FALSE);
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
}
break;
}
case OP_FCLT:
case OP_FCLT_UN: {
/* zeroing the register at the start results in
* shorter and faster code (we can also remove the widening op)
*/
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
if (ins->opcode == OP_FCLT_UN) {
guchar *unordered_check = code;
guchar *jump_to_end;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
} else {
amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
}
break;
}
case OP_FCLE: {
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_NB, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
break;
}
case OP_FCGT:
case OP_FCGT_UN: {
/* zeroing the register at the start results in
* shorter and faster code (we can also remove the widening op)
*/
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
if (ins->opcode == OP_FCGT) {
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
} else {
amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
}
break;
}
case OP_FCGE: {
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_NA, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
break;
}
case OP_RCEQ:
case OP_RCGT:
case OP_RCLT:
case OP_RCLT_UN:
case OP_RCGT_UN: {
int x86_cond;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1);
switch (ins->opcode) {
case OP_RCEQ:
x86_cond = X86_CC_EQ;
break;
case OP_RCGT:
x86_cond = X86_CC_LT;
break;
case OP_RCLT:
x86_cond = X86_CC_GT;
break;
case OP_RCLT_UN:
x86_cond = X86_CC_GT;
break;
case OP_RCGT_UN:
x86_cond = X86_CC_LT;
break;
default:
g_assert_not_reached ();
break;
}
guchar *unordered_check;
switch (ins->opcode) {
case OP_RCEQ:
case OP_RCGT:
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
break;
case OP_RCLT_UN:
case OP_RCGT_UN: {
guchar *jump_to_end;
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
break;
}
case OP_RCLT:
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
break;
default:
g_assert_not_reached ();
break;
}
break;
}
case OP_FCLT_MEMBASE:
case OP_FCGT_MEMBASE:
case OP_FCLT_UN_MEMBASE:
case OP_FCGT_UN_MEMBASE:
case OP_FCEQ_MEMBASE: {
guchar *unordered_check, *jump_to_end;
int x86_cond;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
switch (ins->opcode) {
case OP_FCEQ_MEMBASE:
x86_cond = X86_CC_EQ;
break;
case OP_FCLT_MEMBASE:
case OP_FCLT_UN_MEMBASE:
x86_cond = X86_CC_LT;
break;
case OP_FCGT_MEMBASE:
case OP_FCGT_UN_MEMBASE:
x86_cond = X86_CC_GT;
break;
default:
g_assert_not_reached ();
}
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
switch (ins->opcode) {
case OP_FCEQ_MEMBASE:
case OP_FCLT_MEMBASE:
case OP_FCGT_MEMBASE:
amd64_patch (unordered_check, code);
break;
case OP_FCLT_UN_MEMBASE:
case OP_FCGT_UN_MEMBASE:
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
break;
default:
break;
}
break;
}
case OP_FBEQ: {
guchar *jump = code;
x86_branch8 (code, X86_CC_P, 0, TRUE);
EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
amd64_patch (jump, code);
break;
}
case OP_FBNE_UN:
/* Branch if C013 != 100 */
/* branch if !ZF or (PF|CF) */
EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
break;
case OP_FBLT:
EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
break;
case OP_FBLT_UN:
EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
break;
case OP_FBGT:
case OP_FBGT_UN:
if (ins->opcode == OP_FBGT) {
guchar *br1;
/* skip branch if C1=1 */
br1 = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
/* branch if (C0 | C3) = 1 */
EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
amd64_patch (br1, code);
break;
} else {
EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
}
break;
case OP_FBGE: {
/* Branch if C013 == 100 or 001 */
guchar *br1;
/* skip branch if C1=1 */
br1 = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
/* branch if (C0 | C3) = 1 */
EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
amd64_patch (br1, code);
break;
}
case OP_FBGE_UN:
/* Branch if C013 == 000 */
EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
break;
case OP_FBLE: {
/* Branch if C013=000 or 100 */
guchar *br1;
/* skip branch if C1=1 */
br1 = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
/* branch if C0=0 */
EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
amd64_patch (br1, code);
break;
}
case OP_FBLE_UN:
/* Branch if C013 != 001 */
EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
break;
case OP_CKFINITE:
/* Transfer value to the fp stack */
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
amd64_push_reg (code, AMD64_RAX);
amd64_fxam (code);
amd64_fnstsw (code);
amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
amd64_pop_reg (code, AMD64_RAX);
amd64_fstp (code, 0);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
break;
case OP_TLS_GET: {
code = mono_amd64_emit_tls_get (code, ins->dreg, ins->inst_offset);
break;
}
case OP_TLS_SET: {
code = mono_amd64_emit_tls_set (code, ins->sreg1, ins->inst_offset);
break;
}
case OP_MEMORY_BARRIER: {
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8: {
int dreg = ins->dreg;
guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
dreg = AMD64_R11;
amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
amd64_prefix (code, X86_LOCK_PREFIX);
amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
/* dreg contains the old value, add with sreg2 value */
amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
if (ins->dreg != dreg)
amd64_mov_reg_reg (code, ins->dreg, dreg, size);
break;
}
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8: {
guint32 size = ins->opcode == OP_ATOMIC_EXCHANGE_I4 ? 4 : 8;
/* LOCK prefix is implied. */
amd64_mov_reg_reg (code, GP_SCRATCH_REG, ins->sreg2, size);
amd64_xchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, GP_SCRATCH_REG, size);
amd64_mov_reg_reg (code, ins->dreg, GP_SCRATCH_REG, size);
break;
}
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8: {
guint32 size;
if (ins->opcode == OP_ATOMIC_CAS_I8)
size = 8;
else
size = 4;
/*
* See http://msdn.microsoft.com/en-us/magazine/cc302329.aspx for
* an explanation of how this works.
*/
g_assert (ins->sreg3 == AMD64_RAX);
g_assert (ins->sreg1 != AMD64_RAX);
g_assert (ins->sreg1 != ins->sreg2);
amd64_prefix (code, X86_LOCK_PREFIX);
amd64_cmpxchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, ins->sreg2, size);
if (ins->dreg != AMD64_RAX)
amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
break;
}
case OP_ATOMIC_LOAD_I1: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
break;
}
case OP_ATOMIC_LOAD_U1: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
break;
}
case OP_ATOMIC_LOAD_I2: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
break;
}
case OP_ATOMIC_LOAD_U2: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
break;
}
case OP_ATOMIC_LOAD_I4: {
amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
}
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U8: {
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, ins->opcode == OP_ATOMIC_LOAD_U4 ? 4 : 8);
break;
}
case OP_ATOMIC_LOAD_R4: {
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
}
case OP_ATOMIC_LOAD_R8: {
amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8: {
int size;
switch (ins->opcode) {
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
size = 1;
break;
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
size = 2;
break;
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
size = 4;
break;
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8:
size = 8;
break;
}
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, size);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_ATOMIC_STORE_R4: {
amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_ATOMIC_STORE_R8: {
x86_nop (code);
x86_nop (code);
amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
x86_nop (code);
x86_nop (code);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_CARD_TABLE_WBARRIER: {
int ptr = ins->sreg1;
int value = ins->sreg2;
guchar *br = 0;
int nursery_shift, card_table_shift;
gpointer card_table_mask;
size_t nursery_size;
gpointer card_table = mono_gc_get_card_table (&card_table_shift, &card_table_mask);
guint64 nursery_start = (guint64)mono_gc_get_nursery (&nursery_shift, &nursery_size);
guint64 shifted_nursery_start = nursery_start >> nursery_shift;
/*If either point to the stack we can simply avoid the WB. This happens due to
* optimizations revealing a stack store that was not visible when op_cardtable was emited.
*/
if (ins->sreg1 == AMD64_RSP || ins->sreg2 == AMD64_RSP)
continue;
/*
* We need one register we can clobber, we choose EDX and make sreg1
* fixed EAX to work around limitations in the local register allocator.
* sreg2 might get allocated to EDX, but that is not a problem since
* we use it before clobbering EDX.
*/
g_assert (ins->sreg1 == AMD64_RAX);
/*
* This is the code we produce:
*
* edx = value
* edx >>= nursery_shift
* cmp edx, (nursery_start >> nursery_shift)
* jne done
* edx = ptr
* edx >>= card_table_shift
* edx += cardtable
* [edx] = 1
* done:
*/
if (mono_gc_card_table_nursery_check ()) {
if (value != AMD64_RDX)
amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
if (shifted_nursery_start >> 31) {
/*
* The value we need to compare against is 64 bits, so we need
* another spare register. We use RBX, which we save and
* restore.
*/
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
} else {
amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
}
br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
}
amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift);
if (card_table_mask)
amd64_alu_reg_imm (code, X86_AND, AMD64_RDX, (guint32)(guint64)card_table_mask);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, card_table);
amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0);
amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1);
if (mono_gc_card_table_nursery_check ())
x86_patch (br, code);
break;
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
/* TODO: Some of these IR opcodes are marked as no clobber when they indeed do. */
case OP_ADDPS:
amd64_sse_addps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPS:
amd64_sse_divps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPS:
amd64_sse_mulps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPS:
amd64_sse_subps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPS:
amd64_sse_maxps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPS:
amd64_sse_minps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
amd64_sse_cmpps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPS:
amd64_sse_andps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPS:
amd64_sse_andnps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPS:
amd64_sse_orps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPS:
amd64_sse_xorps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPS:
amd64_sse_sqrtps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RSQRTPS:
amd64_sse_rsqrtps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RCPPS:
amd64_sse_rcpps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPS:
amd64_sse_addsubps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPS:
amd64_sse_haddps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPS:
amd64_sse_hsubps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPS_HIGH:
amd64_sse_movshdup_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_DUPPS_LOW:
amd64_sse_movsldup_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_PSHUFLEW_HIGH:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_pshufhw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLEW_LOW:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_pshuflw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLED:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_SHUFPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_shufps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_SHUFPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3);
amd64_sse_shufpd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ADDPD:
amd64_sse_addpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPD:
amd64_sse_divpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPD:
amd64_sse_mulpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPD:
amd64_sse_subpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPD:
amd64_sse_maxpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPD:
amd64_sse_minpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
amd64_sse_cmppd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPD:
amd64_sse_andpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPD:
amd64_sse_andnpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPD:
amd64_sse_orpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPD:
amd64_sse_xorpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPD:
amd64_sse_sqrtpd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPD:
amd64_sse_addsubpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPD:
amd64_sse_haddpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPD:
amd64_sse_hsubpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPD:
amd64_sse_movddup_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_EXTRACT_MASK:
amd64_sse_pmovmskb_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_PAND:
amd64_sse_pand_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PANDN:
amd64_sse_pandn_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_POR:
amd64_sse_por_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PXOR:
amd64_sse_pxor_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB:
amd64_sse_paddb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW:
amd64_sse_paddw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDD:
amd64_sse_paddd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDQ:
amd64_sse_paddq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB:
amd64_sse_psubb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW:
amd64_sse_psubw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBD:
amd64_sse_psubd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBQ:
amd64_sse_psubq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB_UN:
amd64_sse_pmaxub_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW_UN:
amd64_sse_pmaxuw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD_UN:
amd64_sse_pmaxud_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB:
amd64_sse_pmaxsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW:
amd64_sse_pmaxsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD:
amd64_sse_pmaxsd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGB_UN:
amd64_sse_pavgb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGW_UN:
amd64_sse_pavgw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB_UN:
amd64_sse_pminub_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW_UN:
amd64_sse_pminuw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND_UN:
amd64_sse_pminud_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB:
amd64_sse_pminsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW:
amd64_sse_pminsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND:
amd64_sse_pminsd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQB:
amd64_sse_pcmpeqb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQW:
amd64_sse_pcmpeqw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQD:
amd64_sse_pcmpeqd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQQ:
amd64_sse_pcmpeqq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTB:
amd64_sse_pcmpgtb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTW:
amd64_sse_pcmpgtw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTD:
amd64_sse_pcmpgtd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTQ:
amd64_sse_pcmpgtq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUM_ABS_DIFF:
amd64_sse_psadbw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWB:
amd64_sse_punpcklbw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWW:
amd64_sse_punpcklwd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWD:
amd64_sse_punpckldq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWQ:
amd64_sse_punpcklqdq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPS:
amd64_sse_unpcklps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPD:
amd64_sse_unpcklpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHB:
amd64_sse_punpckhbw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHW:
amd64_sse_punpckhwd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHD:
amd64_sse_punpckhdq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHQ:
amd64_sse_punpckhqdq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPS:
amd64_sse_unpckhps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPD:
amd64_sse_unpckhpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW:
amd64_sse_packsswb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD:
amd64_sse_packssdw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW_UN:
amd64_sse_packuswb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD_UN:
amd64_sse_packusdw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT_UN:
amd64_sse_paddusb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT_UN:
amd64_sse_psubusb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT_UN:
amd64_sse_paddusw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT_UN:
amd64_sse_psubusw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT:
amd64_sse_paddsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT:
amd64_sse_psubsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT:
amd64_sse_paddsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT:
amd64_sse_psubsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW:
amd64_sse_pmullw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULD:
amd64_sse_pmulld_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULQ:
amd64_sse_pmuludq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH_UN:
amd64_sse_pmulhuw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH:
amd64_sse_pmulhw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSHRW:
amd64_sse_psrlw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRW_REG:
amd64_sse_psrlw_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSARW:
amd64_sse_psraw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARW_REG:
amd64_sse_psraw_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLW:
amd64_sse_psllw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLW_REG:
amd64_sse_psllw_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRD:
amd64_sse_psrld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRD_REG:
amd64_sse_psrld_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSARD:
amd64_sse_psrad_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARD_REG:
amd64_sse_psrad_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLD:
amd64_sse_pslld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLD_REG:
amd64_sse_pslld_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRQ:
amd64_sse_psrlq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRQ_REG:
amd64_sse_psrlq_reg_reg (code, ins->dreg, ins->sreg2);
break;
/*TODO: This is appart of the sse spec but not added
case OP_PSARQ:
amd64_sse_psraq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARQ_REG:
amd64_sse_psraq_reg_reg (code, ins->dreg, ins->sreg2);
break;
*/
case OP_PSHLQ:
amd64_sse_psllq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLQ_REG:
amd64_sse_psllq_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_CVTDQ2PD:
amd64_sse_cvtdq2pd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTDQ2PS:
amd64_sse_cvtdq2ps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2DQ:
amd64_sse_cvtpd2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2PS:
amd64_sse_cvtpd2ps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2DQ:
amd64_sse_cvtps2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2PD:
amd64_sse_cvtps2pd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPD2DQ:
amd64_sse_cvttpd2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPS2DQ:
amd64_sse_cvttps2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_ICONV_TO_X:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I4:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I8:
if (ins->inst_c0) {
amd64_movhlps_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8);
} else {
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
}
break;
case OP_EXTRACT_I1:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8);
amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I1, FALSE);
break;
case OP_EXTRACT_I2:
/*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/
amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I2, TRUE, 4);
break;
case OP_EXTRACT_R8:
if (ins->inst_c0)
amd64_movhlps_reg_reg (code, ins->dreg, ins->sreg1);
else
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_INSERT_I2:
amd64_sse_pinsrw_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_EXTRACTX_U2:
amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_INSERTX_U1_SLOW:
/*sreg1 is the extracted ireg (scratch)
/sreg2 is the to be inserted ireg (scratch)
/dreg is the xreg to receive the value*/
/*clear the bits from the extracted word*/
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00);
/*shift the value to insert if needed*/
if (ins->inst_c0 & 1)
amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4);
/*join them together*/
amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2);
break;
case OP_INSERTX_I4_SLOW:
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2);
amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16);
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1);
break;
case OP_INSERTX_I8_SLOW:
amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8);
if (ins->inst_c0)
amd64_movlhps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
else
amd64_sse_movsd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
break;
case OP_INSERTX_R4_SLOW:
switch (ins->inst_c0) {
case 0:
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case 1:
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
break;
case 2:
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
break;
case 3:
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
break;
}
break;
case OP_INSERTX_R8_SLOW:
if (ins->inst_c0)
amd64_movlhps_reg_reg (code, ins->dreg, ins->sreg2);
else
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_STOREX_MEMBASE_REG:
case OP_STOREX_MEMBASE:
amd64_sse_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_LOADX_MEMBASE:
amd64_sse_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_LOADX_ALIGNED_MEMBASE:
amd64_sse_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_STOREX_ALIGNED_MEMBASE_REG:
amd64_sse_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_STOREX_NTA_MEMBASE_REG:
amd64_sse_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_PREFETCH_MEMBASE:
amd64_sse_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
break;
case OP_XMOVE:
/*FIXME the peephole pass should have killed this*/
if (ins->dreg != ins->sreg1)
amd64_sse_movaps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_XZERO:
amd64_sse_pxor_reg_reg (code, ins->dreg, ins->dreg);
break;
case OP_XONES:
amd64_sse_pcmpeqb_reg_reg (code, ins->dreg, ins->dreg);
break;
case OP_ICONV_TO_R4_RAW:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_FCONV_TO_R8_X:
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_XCONV_R8_TO_I4:
amd64_sse_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
switch (ins->backend.source_opcode) {
case OP_FCONV_TO_I1:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
break;
case OP_FCONV_TO_U1:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_FCONV_TO_I2:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
break;
case OP_FCONV_TO_U2:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
break;
}
break;
case OP_EXPAND_I2:
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 0);
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 1);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I4:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I8:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44);
break;
case OP_EXPAND_R4:
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_R8:
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44);
break;
case OP_SSE41_ROUNDP: {
if (ins->inst_c1 == MONO_TYPE_R8)
amd64_sse_roundpd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
else
g_assert_not_reached (); // roundps, but it's not used anywhere for non-llvm back-end yet.
break;
}
#endif
case OP_LZCNT32:
amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_LZCNT64:
amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_POPCNT32:
amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_POPCNT64:
amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
guint8 *br [1];
amd64_test_membase_imm_size (code, ins->sreg1, 0, 1, 4);
br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_threads_state_poll);
amd64_patch (br[0], code);
break;
}
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
break;
case OP_GC_SPILL_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
case OP_GET_LAST_ERROR:
code = emit_get_last_error(code, ins->dreg);
break;
case OP_FILL_PROF_CALL_CTX:
for (int i = 0; i < AMD64_NREG; i++)
if (AMD64_IS_CALLEE_SAVED_REG (i) || i == AMD64_RSP)
amd64_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, gregs) + i * sizeof (target_mgreg_t), i, sizeof (target_mgreg_t));
break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
g_assertf ((code - cfg->native_code - offset) <= max_len,
"wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, (int)(code - cfg->native_code - offset));
}
set_code_cursor (cfg, code);
}
#endif /* DISABLE_JIT */
G_BEGIN_DECLS
void __chkstk (void);
void ___chkstk_ms (void);
G_END_DECLS
void
mono_arch_register_lowlevel_calls (void)
{
/* The signature doesn't matter */
mono_register_jit_icall (mono_amd64_throw_exception, mono_icall_sig_void, TRUE);
#if defined(TARGET_WIN32) || defined(HOST_WIN32)
#if _MSC_VER
mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, __chkstk, "mono_chkstk_win64", NULL, TRUE, "__chkstk");
#else
mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, ___chkstk_ms, "mono_chkstk_win64", NULL, TRUE, "___chkstk_ms");
#endif
#endif
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
unsigned char *ip = ji->ip.i + code;
/*
* Debug code to help track down problems where the target of a near call is
* is not valid.
*/
if (amd64_is_near_call (ip)) {
gint64 disp = (guint8*)target - (guint8*)ip;
if (!amd64_is_imm32 (disp)) {
printf ("TYPE: %d\n", ji->type);
switch (ji->type) {
case MONO_PATCH_INFO_JIT_ICALL_ID:
printf ("V: %s\n", mono_find_jit_icall_info (ji->data.jit_icall_id)->name);
break;
case MONO_PATCH_INFO_METHOD_JUMP:
case MONO_PATCH_INFO_METHOD:
printf ("V: %s\n", ji->data.method->name);
break;
default:
break;
}
}
}
amd64_patch (ip, (gpointer)target);
}
#ifndef DISABLE_JIT
static int
get_max_epilog_size (MonoCompile *cfg)
{
int max_epilog_size = 16;
if (cfg->method->save_lmf)
max_epilog_size += 256;
max_epilog_size += (AMD64_NREG * 2);
return max_epilog_size;
}
/*
* This macro is used for testing whenever the unwinder works correctly at every point
* where an async exception can happen.
*/
/* This will generate a SIGSEGV at the given point in the code */
#define async_exc_point(code) do { \
if (mono_inject_async_exc_method && mono_method_desc_full_match (mono_inject_async_exc_method, cfg->method)) { \
if (cfg->arch.async_point_count == mono_inject_async_exc_pos) \
amd64_mov_reg_mem (code, AMD64_RAX, 0, 4); \
cfg->arch.async_point_count ++; \
} \
} while (0)
#ifdef TARGET_WIN32
static guint8 *
emit_prolog_setup_sp_win64 (MonoCompile *cfg, guint8 *code, int alloc_size, int *cfa_offset_input)
{
int cfa_offset = *cfa_offset_input;
/* Allocate windows stack frame using stack probing method */
if (alloc_size) {
if (alloc_size >= 0x1000) {
amd64_mov_reg_imm (code, AMD64_RAX, alloc_size);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_chkstk_win64);
}
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
if (cfg->arch.omit_fp) {
cfa_offset += alloc_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
// NOTE, in a standard win64 prolog the alloc unwind info is always emitted, but since mono
// uses a frame pointer with negative offsets and a standard win64 prolog assumes positive offsets, we can't
// emit sp alloc unwind metadata since the native OS unwinder will incorrectly restore sp. Excluding the alloc
// metadata on the other hand won't give the OS the information so it can just restore the frame pointer to sp and
// that will retrieve the expected results.
if (cfg->arch.omit_fp)
mono_emit_unwind_op_sp_alloc (cfg, code, alloc_size);
}
*cfa_offset_input = cfa_offset;
set_code_cursor (cfg, code);
return code;
}
#endif /* TARGET_WIN32 */
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *ins;
int alloc_size, pos, i, cfa_offset, quad, max_epilog_size, save_area_offset;
guint8 *code;
CallInfo *cinfo;
MonoInst *lmf_var = cfg->lmf_var;
gboolean args_clobbered = FALSE;
cfg->code_size = MAX (cfg->header->code_size * 4, 1024);
code = cfg->native_code = (unsigned char *)g_malloc (cfg->code_size);
/* Amount of stack space allocated by register saving code */
pos = 0;
/* Offset between RSP and the CFA */
cfa_offset = 0;
/*
* The prolog consists of the following parts:
* FP present:
* - push rbp
* - mov rbp, rsp
* - save callee saved regs using moves
* - allocate frame
* - save rgctx if needed
* - save lmf if needed
* FP not present:
* - allocate frame
* - save rgctx if needed
* - save lmf if needed
* - save callee saved regs using moves
*/
// CFA = sp + 8
cfa_offset = 8;
mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8);
// IP saved at CFA - 8
mono_emit_unwind_op_offset (cfg, code, AMD64_RIP, -cfa_offset);
async_exc_point (code);
mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
if (!cfg->arch.omit_fp) {
amd64_push_reg (code, AMD64_RBP);
cfa_offset += 8;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - cfa_offset);
async_exc_point (code);
/* These are handled automatically by the stack marking code */
mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
mono_emit_unwind_op_fp_alloc (cfg, code, AMD64_RBP, 0);
async_exc_point (code);
}
/* The param area is always at offset 0 from sp */
/* This needs to be allocated here, since it has to come after the spill area */
if (cfg->param_area) {
if (cfg->arch.omit_fp)
// FIXME:
g_assert_not_reached ();
cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (target_mgreg_t));
}
if (cfg->arch.omit_fp) {
/*
* On enter, the stack is misaligned by the pushing of the return
* address. It is either made aligned by the pushing of %rbp, or by
* this.
*/
alloc_size = ALIGN_TO (cfg->stack_offset, 8);
if ((alloc_size % 16) == 0) {
alloc_size += 8;
/* Mark the padding slot as NOREF */
mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset - sizeof (target_mgreg_t), SLOT_NOREF);
}
} else {
alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
if (cfg->stack_offset != alloc_size) {
/* Mark the padding slot as NOREF */
mini_gc_set_slot_type_from_fp (cfg, -alloc_size + cfg->param_area, SLOT_NOREF);
}
cfg->arch.sp_fp_offset = alloc_size;
alloc_size -= pos;
}
cfg->arch.stack_alloc_size = alloc_size;
set_code_cursor (cfg, code);
/* Allocate stack frame */
#ifdef TARGET_WIN32
code = emit_prolog_setup_sp_win64 (cfg, code, alloc_size, &cfa_offset);
#else
if (alloc_size) {
/* See mono_emit_stack_alloc */
#if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
/* Use a loop for large sizes */
if (remaining_size > 10 * 0x1000) {
amd64_mov_reg_imm (code, X86_EAX, remaining_size / 0x1000);
guint8 *label = code;
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RAX, 1);
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
guint8 *label2 = code;
x86_branch8 (code, X86_CC_NE, 0, FALSE);
amd64_patch (label2, label);
if (cfg->arch.omit_fp) {
cfa_offset += (remaining_size / 0x1000) * 0x1000;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
}
remaining_size = remaining_size % 0x1000;
set_code_cursor (cfg, code);
}
guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 11; /*11 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/
code = realloc_code (cfg, required_code_size);
while (remaining_size >= 0x1000) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
if (cfg->arch.omit_fp) {
cfa_offset += 0x1000;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
}
async_exc_point (code);
amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
remaining_size -= 0x1000;
}
if (remaining_size) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
if (cfg->arch.omit_fp) {
cfa_offset += remaining_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
}
#else
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
if (cfg->arch.omit_fp) {
cfa_offset += alloc_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
#endif
}
#endif
/* Stack alignment check */
#if 0
{
guint8 *buf;
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
buf = code;
x86_branch8 (code, X86_CC_EQ, 1, FALSE);
amd64_breakpoint (code);
amd64_patch (buf, code);
}
#endif
if (mini_debug_options.init_stacks) {
/* Fill the stack frame with a dummy value to force deterministic behavior */
/* Save registers to the red zone */
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDI, 8);
amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8);
MONO_DISABLE_WARNING (4310) // cast truncates constant value
amd64_mov_reg_imm (code, AMD64_RAX, 0x2a2a2a2a2a2a2a2a);
MONO_RESTORE_WARNING
amd64_mov_reg_imm (code, AMD64_RCX, alloc_size / 8);
amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
amd64_cld (code);
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
}
/* Save LMF */
if (method->save_lmf)
code = emit_setup_lmf (cfg, code, lmf_var->inst_offset, cfa_offset);
/* Save callee saved registers */
if (cfg->arch.omit_fp) {
save_area_offset = cfg->arch.reg_save_area_offset;
/* Save caller saved registers after sp is adjusted */
/* The registers are saved at the bottom of the frame */
/* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
} else {
/* The registers are saved just below the saved rbp */
save_area_offset = cfg->arch.reg_save_area_offset;
}
for (i = 0; i < AMD64_NREG; ++i) {
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
amd64_mov_membase_reg (code, cfg->frame_reg, save_area_offset, i, 8);
if (cfg->arch.omit_fp) {
mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
/* These are handled automatically by the stack marking code */
mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
} else {
mono_emit_unwind_op_offset (cfg, code, i, - (-save_area_offset + (2 * 8)));
// FIXME: GC
}
save_area_offset += 8;
async_exc_point (code);
}
}
/* store runtime generic context */
if (cfg->rgctx_var) {
g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
(cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP));
amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer));
mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, cfg->rgctx_var, FALSE, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, code - cfg->native_code, 0);
}
/* compute max_length in order to use short forward jumps */
max_epilog_size = get_max_epilog_size (cfg);
if (cfg->opt & MONO_OPT_BRANCH && cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
int max_length = 0;
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_length += LOOP_ALIGNMENT;
MONO_BB_FOR_EACH_INS (bb, ins) {
max_length += ins_get_size (ins->opcode);
}
/* Take prolog and epilog instrumentation into account */
if (bb == cfg->bb_entry || bb == cfg->bb_exit)
max_length += max_epilog_size;
bb->max_length = max_length;
}
}
sig = mono_method_signature_internal (method);
pos = 0;
cinfo = cfg->arch.cinfo;
if (sig->ret->type != MONO_TYPE_VOID) {
/* Save volatile arguments to the stack */
if (cfg->vret_addr && (cfg->vret_addr->opcode != OP_REGVAR))
amd64_mov_membase_reg (code, cfg->vret_addr->inst_basereg, cfg->vret_addr->inst_offset, cinfo->ret.reg, 8);
}
/* Keep this in sync with emit_load_volatile_arguments */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
ins = cfg->args [i];
if (ins->flags & MONO_INST_IS_DEAD && !MONO_CFG_PROFILE (cfg, ENTER_CONTEXT))
/* Unused arguments */
continue;
/* Save volatile arguments to the stack */
if (ins->opcode != OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg: {
guint32 size = 8;
/* FIXME: I1 etc */
/*
if (stack_offset & 0x1)
size = 1;
else if (stack_offset & 0x2)
size = 2;
else if (stack_offset & 0x4)
size = 4;
else
size = 8;
*/
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, size);
/*
* Save the original location of 'this',
* mono_get_generic_info_from_stack_frame () needs this to properly look up
* the argument value during the handling of async exceptions.
*/
if (i == 0 && sig->hasthis) {
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
break;
}
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg);
break;
case ArgInDoubleSSEReg:
amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg);
break;
case ArgValuetypeInReg:
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad], sizeof (target_mgreg_t));
break;
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
}
}
break;
case ArgValuetypeAddrInIReg:
if (ainfo->pair_storage [0] == ArgInIReg)
amd64_mov_membase_reg (code, ins->inst_left->inst_basereg, ins->inst_left->inst_offset, ainfo->pair_regs [0], sizeof (target_mgreg_t));
break;
case ArgValuetypeAddrOnStack:
break;
case ArgGSharedVtInReg:
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, 8);
break;
default:
break;
}
} else {
/* Argument allocated to (non-volatile) register */
switch (ainfo->storage) {
case ArgInIReg:
amd64_mov_reg_reg (code, ins->dreg, ainfo->reg, 8);
break;
case ArgOnStack:
amd64_mov_reg_membase (code, ins->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
break;
default:
g_assert_not_reached ();
}
if (i == 0 && sig->hasthis) {
g_assert (ainfo->storage == ArgInIReg);
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0);
}
}
}
if (cfg->method->save_lmf)
args_clobbered = TRUE;
/*
* Optimize the common case of the first bblock making a call with the same
* arguments as the method. This works because the arguments are still in their
* original argument registers.
* FIXME: Generalize this
*/
if (!args_clobbered) {
MonoBasicBlock *first_bb = cfg->bb_entry;
MonoInst *next;
int filter = FILTER_IL_SEQ_POINT;
next = mono_bb_first_inst (first_bb, filter);
if (!next && first_bb->next_bb) {
first_bb = first_bb->next_bb;
next = mono_bb_first_inst (first_bb, filter);
}
if (first_bb->in_count > 1)
next = NULL;
for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gboolean match = FALSE;
ins = cfg->args [i];
if (ins->opcode != OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg: {
if (((next->opcode == OP_LOAD_MEMBASE) || (next->opcode == OP_LOADI4_MEMBASE)) && next->inst_basereg == ins->inst_basereg && next->inst_offset == ins->inst_offset) {
if (next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
} else {
next->opcode = OP_MOVE;
next->sreg1 = ainfo->reg;
/* Only continue if the instruction doesn't change argument regs */
if (next->dreg == ainfo->reg || next->dreg == AMD64_RAX)
match = TRUE;
}
}
break;
}
default:
break;
}
} else {
/* Argument allocated to (non-volatile) register */
switch (ainfo->storage) {
case ArgInIReg:
if (next->opcode == OP_MOVE && next->sreg1 == ins->dreg && next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
}
break;
default:
break;
}
}
if (match) {
next = mono_inst_next (next, filter);
//next = mono_inst_list_next (&next->node, &first_bb->ins_list);
if (!next)
break;
}
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
/* Initialize seq_point_info_var */
if (cfg->compile_aot) {
/* Initialize the variable from a GOT slot */
/* Same as OP_AOTCONST */
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer));
g_assert (info_var->opcode == OP_REGOFFSET);
amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8);
}
if (cfg->compile_aot) {
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr), 8);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
} else {
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
amd64_mov_reg_imm (code, AMD64_R11, (guint64)&ss_trampoline);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
/* Initialize bp_tramp_var */
ins = cfg->arch.bp_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
amd64_mov_reg_imm (code, AMD64_R11, (guint64)&bp_trampoline);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
}
set_code_cursor (cfg, code);
return code;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
int quad, i;
guint8 *code;
int max_epilog_size;
CallInfo *cinfo;
gint32 lmf_offset = cfg->lmf_var ? cfg->lmf_var->inst_offset : -1;
gint32 save_area_offset = cfg->arch.reg_save_area_offset;
max_epilog_size = get_max_epilog_size (cfg);
code = realloc_code (cfg, max_epilog_size);
cfg->has_unwind_info_for_epilog = TRUE;
/* Mark the start of the epilog */
mono_emit_unwind_op_mark_loc (cfg, code, 0);
/* Save the uwind state which is needed by the out-of-line code */
mono_emit_unwind_op_remember_state (cfg, code);
/* the code restoring the registers must be kept in sync with OP_TAILCALL */
if (method->save_lmf) {
if (cfg->used_int_regs & (1 << AMD64_RBP))
amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
if (cfg->arch.omit_fp)
/*
* emit_setup_lmf () marks RBP as saved, we have to mark it as same value here before clearing up the stack
* since its stack slot will become invalid.
*/
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
}
/* Restore callee saved regs */
for (i = 0; i < AMD64_NREG; ++i) {
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
/* Restore only used_int_regs, not arch.saved_iregs */
#if defined(MONO_SUPPORT_TASKLETS)
int restore_reg = 1;
#else
int restore_reg = (cfg->used_int_regs & (1 << i));
#endif
if (restore_reg) {
amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8);
mono_emit_unwind_op_same_value (cfg, code, i);
async_exc_point (code);
}
save_area_offset += 8;
}
}
/* Load returned vtypes into registers if needed */
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == ArgValuetypeInReg) {
ArgInfo *ainfo = &cinfo->ret;
MonoInst *inst = cfg->ret;
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_size [quad]);
break;
case ArgInFloatSSEReg:
amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)));
break;
case ArgInDoubleSSEReg:
amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)));
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
}
}
}
if (cfg->arch.omit_fp) {
if (cfg->arch.stack_alloc_size) {
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
}
} else {
#ifdef TARGET_WIN32
amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
amd64_pop_reg (code, AMD64_RBP);
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
#else
amd64_leave (code);
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
#endif
}
mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8);
async_exc_point (code);
amd64_ret (code);
/* Restore the unwind state to be the same as before the epilog */
mono_emit_unwind_op_restore_state (cfg, code);
set_code_cursor (cfg, code);
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
int nthrows, i;
guint8 *code;
MonoClass *exc_classes [16];
guint8 *exc_throw_start [16], *exc_throw_end [16];
guint32 code_size = 0;
/* Compute needed space */
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_EXC)
code_size += 40;
if (patch_info->type == MONO_PATCH_INFO_R8)
code_size += 8 + 15; /* sizeof (double) + alignment */
if (patch_info->type == MONO_PATCH_INFO_R4)
code_size += 4 + 15; /* sizeof (float) + alignment */
if (patch_info->type == MONO_PATCH_INFO_GC_CARD_TABLE_ADDR)
code_size += 8 + 7; /*sizeof (void*) + alignment */
}
code = realloc_code (cfg, code_size);
/* add code to raise exceptions */
nthrows = 0;
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC: {
MonoClass *exc_class;
guint8 *buf, *buf2;
guint32 throw_ip;
amd64_patch (patch_info->ip.i + cfg->native_code, code);
exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
throw_ip = patch_info->ip.i;
//x86_breakpoint (code);
/* Find a throw sequence for the same exception class */
for (i = 0; i < nthrows; ++i)
if (exc_classes [i] == exc_class)
break;
if (i < nthrows) {
amd64_mov_reg_imm (code, AMD64_ARG_REG2, (exc_throw_end [i] - cfg->native_code) - throw_ip);
x86_jump_code (code, exc_throw_start [i]);
patch_info->type = MONO_PATCH_INFO_NONE;
}
else {
buf = code;
amd64_mov_reg_imm_size (code, AMD64_ARG_REG2, 0xf0f0f0f0, 4);
buf2 = code;
if (nthrows < 16) {
exc_classes [nthrows] = exc_class;
exc_throw_start [nthrows] = code;
}
amd64_mov_reg_imm (code, AMD64_ARG_REG1, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF);
patch_info->type = MONO_PATCH_INFO_NONE;
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_corlib_exception);
amd64_mov_reg_imm (buf, AMD64_ARG_REG2, (code - cfg->native_code) - throw_ip);
while (buf < buf2)
x86_nop (buf);
if (nthrows < 16) {
exc_throw_end [nthrows] = code;
nthrows ++;
}
}
break;
}
default:
/* do nothing */
break;
}
set_code_cursor (cfg, code);
}
/* Handle relocations with RIP relative addressing */
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
gboolean remove = FALSE;
guint8 *orig_code = code;
switch (patch_info->type) {
case MONO_PATCH_INFO_R8:
case MONO_PATCH_INFO_R4: {
guint8 *pos, *patch_pos;
guint32 target_pos;
/* The SSE opcodes require a 16 byte alignment */
code = (guint8*)ALIGN_TO (code, 16);
pos = cfg->native_code + patch_info->ip.i;
if (IS_REX (pos [1])) {
patch_pos = pos + 5;
target_pos = code - pos - 9;
}
else {
patch_pos = pos + 4;
target_pos = code - pos - 8;
}
if (patch_info->type == MONO_PATCH_INFO_R8) {
*(double*)code = *(double*)patch_info->data.target;
code += sizeof (double);
} else {
*(float*)code = *(float*)patch_info->data.target;
code += sizeof (float);
}
*(guint32*)(patch_pos) = target_pos;
remove = TRUE;
break;
}
case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: {
guint8 *pos;
if (cfg->compile_aot)
continue;
/*loading is faster against aligned addresses.*/
code = (guint8*)ALIGN_TO (code, 8);
memset (orig_code, 0, code - orig_code);
pos = cfg->native_code + patch_info->ip.i;
/*alu_op [rex] modr/m imm32 - 7 or 8 bytes */
if (IS_REX (pos [1]))
*(guint32*)(pos + 4) = (guint8*)code - pos - 8;
else
*(guint32*)(pos + 3) = (guint8*)code - pos - 7;
*(gpointer*)code = (gpointer)patch_info->data.target;
code += sizeof (gpointer);
remove = TRUE;
break;
}
default:
break;
}
if (remove) {
if (patch_info == cfg->patch_info)
cfg->patch_info = patch_info->next;
else {
MonoJumpInfo *tmp;
for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
;
tmp->next = patch_info->next;
}
}
set_code_cursor (cfg, code);
}
set_code_cursor (cfg, code);
}
#endif /* DISABLE_JIT */
MONO_NEVER_INLINE
void
mono_arch_flush_icache (guint8 *code, gint size)
{
/* call/ret required (or likely other control transfer) */
}
void
mono_arch_flush_register_windows (void)
{
}
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return amd64_use_imm32 (imm);
}
/*
* Determine whenever the trap whose info is in SIGINFO is caused by
* integer overflow.
*/
gboolean
mono_arch_is_int_overflow (void *sigctx, void *info)
{
MonoContext ctx;
guint8* rip;
int reg;
gint64 value;
mono_sigctx_to_monoctx (sigctx, &ctx);
rip = (guint8*)ctx.gregs [AMD64_RIP];
if (IS_REX (rip [0])) {
reg = amd64_rex_b (rip [0]);
rip ++;
}
else
reg = 0;
if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
/* idiv REG */
reg += x86_modrm_rm (rip [1]);
value = ctx.gregs [reg];
if (value == -1)
return TRUE;
}
return FALSE;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return 3;
}
/**
* \return TRUE if no sw breakpoint was present (always).
*
* Copy \p size bytes from \p code - \p offset to the buffer \p buf. If the debugger inserted software
* breakpoints in the original code, they are removed in the copy.
*/
gboolean
mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
{
/*
* If method_start is non-NULL we need to perform bound checks, since we access memory
* at code - offset we could go before the start of the method and end up in a different
* page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
* instead.
*/
if (!method_start || code - offset >= method_start) {
memcpy (buf, code - offset, size);
} else {
int diff = code - method_start;
memset (buf, 0, size);
memcpy (buf + offset - diff, method_start, diff + size - offset);
}
return TRUE;
}
int
mono_arch_get_this_arg_reg (guint8 *code)
{
return AMD64_ARG_REG1;
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [mono_arch_get_this_arg_reg (code)];
}
#define MAX_ARCH_DELEGATE_PARAMS 10
static gpointer
get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count)
{
guint8 *code, *start;
GSList *unwind_ops = NULL;
int i;
unwind_ops = mono_arch_get_cie_program ();
const int size = 64;
start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
if (has_target) {
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
} else {
if (param_count == 0) {
amd64_jump_membase (code, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
} else {
/* We have to shift the arguments left */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
for (i = 0; i < param_count; ++i) {
#ifdef TARGET_WIN32
if (i < 3)
amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
else
amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, 0x28, 8);
#else
amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
#endif
}
amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
}
}
g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
mono_arch_flush_icache (start, code - start);
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
} else {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
*info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
g_free (name);
}
if (mono_jit_map_is_enabled ()) {
char *buff;
if (has_target)
buff = (char*)"delegate_invoke_has_target";
else
buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count);
mono_emit_jit_tramp (start, code - start, buff);
if (!has_target)
g_free (buff);
}
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
return start;
}
#define MAX_VIRTUAL_DELEGATE_OFFSET 32
static gpointer
get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset)
{
guint8 *code, *start;
const int size = 20;
char *tramp_name;
GSList *unwind_ops;
if (offset / (int)sizeof (target_mgreg_t) > MAX_VIRTUAL_DELEGATE_OFFSET)
return NULL;
start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
unwind_ops = mono_arch_get_cie_program ();
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
if (load_imt_reg) {
/* Load the IMT reg */
amd64_mov_reg_membase (code, MONO_ARCH_IMT_REG, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 8);
}
/* Load the vtable */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8);
amd64_jump_membase (code, AMD64_RAX, offset);
g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
tramp_name = mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset);
*info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops);
g_free (tramp_name);
return start;
}
/*
* mono_arch_get_delegate_invoke_impls:
*
* Return a list of MonoTrampInfo structures for the delegate invoke impl
* trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
MonoTrampInfo *info;
int i;
get_delegate_invoke_impl (&info, TRUE, 0);
res = g_slist_prepend (res, info);
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
get_delegate_invoke_impl (&info, FALSE, i);
res = g_slist_prepend (res, info);
}
for (i = 1; i <= MONO_IMT_SIZE; ++i) {
get_delegate_virtual_invoke_impl (&info, TRUE, - i * TARGET_SIZEOF_VOID_P);
res = g_slist_prepend (res, info);
}
for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
get_delegate_virtual_invoke_impl (&info, FALSE, i * TARGET_SIZEOF_VOID_P);
res = g_slist_prepend (res, info);
get_delegate_virtual_invoke_impl (&info, TRUE, i * TARGET_SIZEOF_VOID_P);
res = g_slist_prepend (res, info);
}
return res;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
/* FIXME: Support more cases */
if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret)))
return NULL;
if (has_target) {
static guint8* cached = NULL;
if (cached)
return cached;
if (mono_ee_features.use_aot_trampolines) {
start = (guint8 *)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
} else {
MonoTrampInfo *info;
start = (guint8 *)get_delegate_invoke_impl (&info, TRUE, 0);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cached = start;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
if (sig->param_count > 4)
return NULL;
code = cache [sig->param_count];
if (code)
return code;
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8 *)mono_aot_get_trampoline (name);
g_free (name);
} else {
MonoTrampInfo *info;
start = (guint8 *)get_delegate_invoke_impl (&info, FALSE, sig->param_count);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cache [sig->param_count] = start;
}
return start;
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
MonoTrampInfo *info;
gpointer code;
code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset);
if (code)
mono_tramp_info_register (info, NULL);
return code;
}
void
mono_arch_finish_init (void)
{
#if !defined(HOST_WIN32) && defined(MONO_XEN_OPT)
optimize_for_xen = access ("/proc/xen", F_OK) == 0;
#endif
}
#define CMP_SIZE (6 + 1)
#define CMP_REG_REG_SIZE (4 + 1)
#define BR_SMALL_SIZE 2
#define BR_LARGE_SIZE 6
#define MOV_REG_IMM_SIZE 10
#define MOV_REG_IMM_32BIT_SIZE 6
#define JUMP_REG_SIZE (2 + 1)
static int
imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
{
int i, distance = 0;
for (i = start; i < target; ++i)
distance += imt_entries [i]->chunk_size;
return distance;
}
/*
* LOCKING: called with the domain lock held
*/
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int i;
int size = 0;
guint8 *code, *start;
gboolean vtable_is_32bit = ((gsize)(vtable) == (gsize)(int)(gsize)(vtable));
GSList *unwind_ops;
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
}
if (item->has_target_code) {
item->chunk_size += MOV_REG_IMM_SIZE;
} else {
if (vtable_is_32bit)
item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE;
}
item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE;
} else {
if (fail_tramp) {
item->chunk_size += MOV_REG_IMM_SIZE * 3 + CMP_REG_REG_SIZE +
BR_SMALL_SIZE + JUMP_REG_SIZE * 2;
} else {
if (vtable_is_32bit)
item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE;
item->chunk_size += JUMP_REG_SIZE;
/* with assert below:
* item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
*/
}
}
} else {
if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
item->chunk_size += BR_LARGE_SIZE;
imt_entries [item->check_target_idx]->compare_done = TRUE;
}
size += item->chunk_size;
}
if (fail_tramp) {
code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
} else {
code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
}
start = code;
unwind_ops = mono_arch_get_cie_program ();
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
if (item->has_target_code) {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->value.target_code);
amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
} else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
}
if (fail_case) {
amd64_patch (item->jmp_code, code);
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, fail_tramp);
amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
item->jmp_code = NULL;
}
} else {
/* enable the commented code to assert on wrong method */
#if 0
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
/* See the comment below about R10 */
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
amd64_patch (item->jmp_code, code);
amd64_breakpoint (code);
item->jmp_code = NULL;
#else
/* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG)
needs to be preserved. R10 needs
to be preserved for calls which
require a runtime generic context,
but interface calls don't. */
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
#endif
}
} else {
if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (target_mgreg_t));
else {
amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (target_mgreg_t));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
x86_branch8 (code, X86_CC_GE, 0, FALSE);
else
x86_branch32 (code, X86_CC_GE, 0, FALSE);
}
g_assertf (code - item->code_target <= item->chunk_size, "%X %X", (guint)(code - item->code_target), (guint)item->chunk_size);
}
/* patch the branches to get to the target items */
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code) {
if (item->check_target_idx) {
amd64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
}
}
}
if (!fail_tramp)
UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
g_assert (code - start <= size);
g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), mem_manager);
return start;
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, AMD64_RSP, 8);
mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, AMD64_RIP, -8);
return l;
}
#ifndef DISABLE_JIT
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
int opcode = 0;
if (cmethod->klass == mono_class_try_get_math_class ()) {
if (strcmp (cmethod->name, "Sqrt") == 0) {
opcode = OP_SQRT;
} else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
opcode = OP_ABS;
}
if (opcode && fsig->param_count == 1) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = args [0]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
opcode = 0;
if (cfg->opt & MONO_OPT_CMOV) {
if (strcmp (cmethod->name, "Min") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMIN;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMIN_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMIN;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMIN_UN;
} else if (strcmp (cmethod->name, "Max") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMAX;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMAX_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMAX;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMAX_UN;
}
}
if (opcode && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
#if 0
/* OP_FREM is not IEEE compatible */
else if (strcmp (cmethod->name, "IEEERemainder") == 0 && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, OP_FREM);
ins->inst_i0 = args [0];
ins->inst_i1 = args [1];
}
#endif
if ((mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
int mode = -1;
if (!strcmp (cmethod->name, "Round"))
mode = 0;
else if (!strcmp (cmethod->name, "Floor"))
mode = 1;
else if (!strcmp (cmethod->name, "Ceiling"))
mode = 2;
if (mode != -1) {
int xreg = alloc_xreg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R8_X, xreg, args [0]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_SSE41_ROUNDP, xreg, xreg);
ins->inst_c0 = mode;
ins->inst_c1 = MONO_TYPE_R8;
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R8, dreg, xreg);
ins->inst_c0 = 0;
ins->inst_c1 = MONO_TYPE_R8;
return ins;
}
}
}
return ins;
}
#endif
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->gregs [reg];
}
host_mgreg_t *
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->gregs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->gregs [reg] = val;
}
/*
* mono_arch_emit_load_aotconst:
*
* Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
* TARGET from the mscorlib GOT in full-aot code.
* On AMD64, the result is placed into R11.
*/
guint8*
mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target)
{
*ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
return code;
}
/*
* mono_arch_get_trampolines:
*
* Return a list of MonoTrampInfo structures describing arch specific trampolines
* for AOT.
*/
GSList *
mono_arch_get_trampolines (gboolean aot)
{
return mono_amd64_get_exception_trampolines (aot);
}
/* Soft Debug support */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
/*
* mono_arch_set_breakpoint:
*
* Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
* The location should contain code emitted by OP_SEQ_POINT.
*/
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start);
g_assert (info->bp_addrs [native_offset] == 0);
info->bp_addrs [native_offset] = mini_get_breakpoint_trampoline ();
} else {
/* ip points to a mov r11, 0 */
g_assert (code [0] == 0x41);
g_assert (code [1] == 0xbb);
amd64_mov_reg_imm (code, AMD64_R11, 1);
}
}
/*
* mono_arch_clear_breakpoint:
*
* Clear the breakpoint at IP.
*/
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start);
info->bp_addrs [native_offset] = NULL;
} else {
amd64_mov_reg_imm (code, AMD64_R11, 0);
}
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
/* We use soft breakpoints on amd64 */
return FALSE;
}
/*
* mono_arch_skip_breakpoint:
*
* Modify CTX so the ip is placed after the breakpoint instruction, so when
* we resume, the instruction is not executed again.
*/
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
g_assert_not_reached ();
}
/*
* mono_arch_start_single_stepping:
*
* Start single stepping.
*/
void
mono_arch_start_single_stepping (void)
{
ss_trampoline = mini_get_single_step_trampoline ();
}
/*
* mono_arch_stop_single_stepping:
*
* Stop single stepping.
*/
void
mono_arch_stop_single_stepping (void)
{
ss_trampoline = NULL;
}
/*
* mono_arch_is_single_step_event:
*
* Return whenever the machine state in SIGCTX corresponds to a single
* step event.
*/
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
/* We use soft breakpoints on amd64 */
return FALSE;
}
/*
* mono_arch_skip_single_step:
*
* Modify CTX so the ip is placed after the single step trigger instruction,
* we resume, the instruction is not executed again.
*/
void
mono_arch_skip_single_step (MonoContext *ctx)
{
g_assert_not_reached ();
}
/*
* mono_arch_create_seq_point_info:
*
* Return a pointer to a data structure which is used by the sequence
* point implementation in AOTed code.
*/
SeqPointInfo*
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
/*
* We don't have access to the method etc. so use the global
* memory manager for now.
*/
jit_mm = get_default_jit_mm ();
// FIXME: Add a free function
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
// FIXME: Optimize the size
info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer)));
info->ss_tramp_addr = &ss_trampoline;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
#endif
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_U8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8:
return TRUE;
default:
return FALSE;
}
}
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
return get_call_info (mp, sig);
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
gpointer target = NULL;
switch (jit_icall_id) {
#undef MONO_AOT_ICALL
#define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
MONO_AOT_ICALL (mono_amd64_resume_unwind)
MONO_AOT_ICALL (mono_amd64_start_gsharedvt_call)
MONO_AOT_ICALL (mono_amd64_throw_corlib_exception)
MONO_AOT_ICALL (mono_amd64_throw_exception)
default:
break;
}
return target;
}
| /**
* \file
* AMD64 backend for the Mono code generator
*
* Based on mini-x86.c.
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
* Patrik Torstensson
* Zoltan Varga ([email protected])
* Johan Lorensson ([email protected])
*
* (C) 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include <string.h>
#include <math.h>
#include <assert.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/tokentype.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/unlocked.h>
#include "interp/interp.h"
#include "ir-emit.h"
#include "mini-amd64.h"
#include "cpu-amd64.h"
#include "mini-gc.h"
#include "mini-runtime.h"
#include "aot-runtime.h"
#ifdef MONO_XEN_OPT
static gboolean optimize_for_xen = TRUE;
#else
#define optimize_for_xen 0
#endif
static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math")
#define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
#define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
/* The single step trampoline */
static gpointer ss_trampoline;
/* The breakpoint trampoline */
static gpointer bp_trampoline;
/* Offset between fp and the first argument in the callee */
#define ARGS_OFFSET 16
#define GP_SCRATCH_REG AMD64_R11
/* Max number of bblocks before we bail from using more advanced branch placement code */
#define MAX_BBLOCKS_FOR_BRANCH_OPTS 800
/*
* AMD64 register usage:
* - callee saved registers are used for global register allocation
* - %r11 is used for materializing 64 bit constants in opcodes
* - the rest is used for local allocation
*/
/*
* Floating point comparison results:
* ZF PF CF
* A > B 0 0 0
* A < B 0 0 1
* A = B 1 0 0
* A > B 0 0 0
* UNORDERED 1 1 1
*/
const char*
mono_arch_regname (int reg)
{
switch (reg) {
case AMD64_RAX: return "%rax";
case AMD64_RBX: return "%rbx";
case AMD64_RCX: return "%rcx";
case AMD64_RDX: return "%rdx";
case AMD64_RSP: return "%rsp";
case AMD64_RBP: return "%rbp";
case AMD64_RDI: return "%rdi";
case AMD64_RSI: return "%rsi";
case AMD64_R8: return "%r8";
case AMD64_R9: return "%r9";
case AMD64_R10: return "%r10";
case AMD64_R11: return "%r11";
case AMD64_R12: return "%r12";
case AMD64_R13: return "%r13";
case AMD64_R14: return "%r14";
case AMD64_R15: return "%r15";
}
return "unknown";
}
static const char * const packed_xmmregs [] = {
"p:xmm0", "p:xmm1", "p:xmm2", "p:xmm3", "p:xmm4", "p:xmm5", "p:xmm6", "p:xmm7", "p:xmm8",
"p:xmm9", "p:xmm10", "p:xmm11", "p:xmm12", "p:xmm13", "p:xmm14", "p:xmm15"
};
static const char * const single_xmmregs [] = {
"s:xmm0", "s:xmm1", "s:xmm2", "s:xmm3", "s:xmm4", "s:xmm5", "s:xmm6", "s:xmm7", "s:xmm8",
"s:xmm9", "s:xmm10", "s:xmm11", "s:xmm12", "s:xmm13", "s:xmm14", "s:xmm15"
};
const char*
mono_arch_fregname (int reg)
{
if (reg < AMD64_XMM_NREG)
return single_xmmregs [reg];
else
return "unknown";
}
const char *
mono_arch_xregname (int reg)
{
if (reg < AMD64_XMM_NREG)
return packed_xmmregs [reg];
else
return "unknown";
}
static gboolean
debug_omit_fp (void)
{
#if 0
return mono_debug_count ();
#else
return TRUE;
#endif
}
static gboolean
amd64_is_near_call (guint8 *code)
{
/* Skip REX */
if ((code [0] >= 0x40) && (code [0] <= 0x4f))
code += 1;
return code [0] == 0xe8;
}
static gboolean
amd64_use_imm32 (gint64 val)
{
if (mini_debug_options.single_imm_size)
return FALSE;
return amd64_is_imm32 (val);
}
void
mono_x86_patch (unsigned char* code, gpointer target)
{
mono_x86_patch_inline (code, target);
}
static void
amd64_patch (unsigned char* code, gpointer target)
{
// NOTE: Sometimes code has just been generated, is not running yet,
// and has no alignment requirements. Sometimes it could be running while we patch it,
// and there are alignment requirements.
// FIXME Assert alignment.
guint8 rex = 0;
/* Skip REX */
if ((code [0] >= 0x40) && (code [0] <= 0x4f)) {
rex = code [0];
code += 1;
}
if ((code [0] & 0xf8) == 0xb8) {
/* amd64_set_reg_template */
*(guint64*)(code + 1) = (guint64)target;
}
else if ((code [0] == 0x8b) && rex && x86_modrm_mod (code [1]) == 0 && x86_modrm_rm (code [1]) == 5) {
/* mov 0(%rip), %dreg */
g_assert (!1); // Historical code was incorrect.
ptrdiff_t const offset = (guchar*)target - (code + 6);
g_assert (offset == (gint32)offset);
*(gint32*)(code + 2) = (gint32)offset;
}
else if (code [0] == 0xff && (code [1] == 0x15 || code [1] == 0x25)) {
/* call or jmp *<OFFSET>(%rip) */
// Patch the data, not the code.
g_assert (!2); // For possible use later.
*(void**)(code + 6 + *(gint32*)(code + 2)) = target;
}
else
x86_patch (code, target);
}
void
mono_amd64_patch (unsigned char* code, gpointer target)
{
amd64_patch (code, target);
}
#define DEBUG(a) if (cfg->verbose_level > 1) a
static void inline
add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
{
ainfo->offset = *stack_size;
if (*gr >= PARAM_REGS) {
ainfo->storage = ArgOnStack;
ainfo->arg_size = sizeof (target_mgreg_t);
/* Since the same stack slot size is used for all arg */
/* types, it needs to be big enough to hold them all */
(*stack_size) += sizeof (target_mgreg_t);
}
else {
ainfo->storage = ArgInIReg;
ainfo->reg = param_regs [*gr];
(*gr) ++;
}
}
static void inline
add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
{
ainfo->offset = *stack_size;
if (*gr >= FLOAT_PARAM_REGS) {
ainfo->storage = ArgOnStack;
ainfo->arg_size = sizeof (target_mgreg_t);
/* Since the same stack slot size is used for both float */
/* types, it needs to be big enough to hold them both */
(*stack_size) += sizeof (target_mgreg_t);
}
else {
/* A double register */
if (is_double)
ainfo->storage = ArgInDoubleSSEReg;
else
ainfo->storage = ArgInFloatSSEReg;
ainfo->reg = *gr;
(*gr) += 1;
}
}
typedef enum ArgumentClass {
ARG_CLASS_NO_CLASS,
ARG_CLASS_MEMORY,
ARG_CLASS_INTEGER,
ARG_CLASS_SSE
} ArgumentClass;
static ArgumentClass
merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
{
ArgumentClass class2 = ARG_CLASS_NO_CLASS;
MonoType *ptype;
ptype = mini_get_underlying_type (type);
switch (ptype->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
class2 = ARG_CLASS_INTEGER;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
#ifdef TARGET_WIN32
class2 = ARG_CLASS_INTEGER;
#else
class2 = ARG_CLASS_SSE;
#endif
break;
case MONO_TYPE_TYPEDBYREF:
g_assert_not_reached ();
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
class2 = ARG_CLASS_INTEGER;
break;
}
/* fall through */
case MONO_TYPE_VALUETYPE: {
MonoMarshalType *info = mono_marshal_load_type_info (ptype->data.klass);
int i;
for (i = 0; i < info->num_fields; ++i) {
class2 = class1;
class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
}
break;
}
default:
g_assert_not_reached ();
}
/* Merge */
if (class1 == class2)
;
else if (class1 == ARG_CLASS_NO_CLASS)
class1 = class2;
else if ((class1 == ARG_CLASS_MEMORY) || (class2 == ARG_CLASS_MEMORY))
class1 = ARG_CLASS_MEMORY;
else if ((class1 == ARG_CLASS_INTEGER) || (class2 == ARG_CLASS_INTEGER))
class1 = ARG_CLASS_INTEGER;
else
class1 = ARG_CLASS_SSE;
return class1;
}
typedef struct {
MonoType *type;
int size, offset;
} StructFieldInfo;
/*
* collect_field_info_nested:
*
* Collect field info from KLASS recursively into FIELDS.
*/
static void
collect_field_info_nested (MonoClass *klass, GArray *fields_array, int offset, gboolean pinvoke, gboolean unicode)
{
MonoMarshalType *info;
int i;
if (pinvoke) {
info = mono_marshal_load_type_info (klass);
g_assert(info);
for (i = 0; i < info->num_fields; ++i) {
if (MONO_TYPE_ISSTRUCT (info->fields [i].field->type)) {
collect_field_info_nested (mono_class_from_mono_type_internal (info->fields [i].field->type), fields_array, info->fields [i].offset, pinvoke, unicode);
} else {
guint32 align;
StructFieldInfo f;
f.type = info->fields [i].field->type;
f.size = mono_marshal_type_size (info->fields [i].field->type,
info->fields [i].mspec,
&align, TRUE, unicode);
f.offset = offset + info->fields [i].offset;
if (i == info->num_fields - 1 && f.size + f.offset < info->native_size) {
/* This can happen with .pack directives eg. 'fixed' arrays */
if (MONO_TYPE_IS_PRIMITIVE (f.type)) {
/* Replicate the last field to fill out the remaining place, since the code in add_valuetype () needs type information */
g_array_append_val (fields_array, f);
while (f.size + f.offset < info->native_size) {
f.offset += f.size;
g_array_append_val (fields_array, f);
}
} else {
f.size = info->native_size - f.offset;
g_array_append_val (fields_array, f);
}
} else {
g_array_append_val (fields_array, f);
}
}
}
} else {
gpointer iter;
MonoClassField *field;
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
if (MONO_TYPE_ISSTRUCT (field->type)) {
collect_field_info_nested (mono_class_from_mono_type_internal (field->type), fields_array, field->offset - MONO_ABI_SIZEOF (MonoObject), pinvoke, unicode);
} else {
int align;
StructFieldInfo f;
f.type = field->type;
f.size = mono_type_size (field->type, &align);
f.offset = field->offset - MONO_ABI_SIZEOF (MonoObject) + offset;
g_array_append_val (fields_array, f);
}
}
}
}
#ifdef TARGET_WIN32
/* Windows x64 ABI can pass/return value types in register of size 1,2,4,8 bytes. */
#define MONO_WIN64_VALUE_TYPE_FITS_REG(arg_size) (arg_size <= SIZEOF_REGISTER && (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8))
static gboolean
allocate_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, const AMD64_Reg_No int_regs [], int int_reg_count, const AMD64_XMM_Reg_No float_regs [], int float_reg_count, guint32 *current_int_reg, guint32 *current_float_reg)
{
gboolean result = FALSE;
assert (arg_info != NULL && int_regs != NULL && float_regs != NULL && current_int_reg != NULL && current_float_reg != NULL);
assert (arg_info->storage == ArgValuetypeInReg || arg_info->storage == ArgValuetypeAddrInIReg);
arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone;
arg_info->pair_regs [0] = arg_info->pair_regs [1] = ArgNone;
arg_info->pair_size [0] = 0;
arg_info->pair_size [1] = 0;
arg_info->nregs = 0;
if (arg_class == ARG_CLASS_INTEGER && *current_int_reg < int_reg_count) {
/* Pass parameter in integer register. */
arg_info->pair_storage [0] = ArgInIReg;
arg_info->pair_regs [0] = int_regs [*current_int_reg];
(*current_int_reg) ++;
result = TRUE;
} else if (arg_class == ARG_CLASS_SSE && *current_float_reg < float_reg_count) {
/* Pass parameter in float register. */
arg_info->pair_storage [0] = (arg_size <= sizeof (gfloat)) ? ArgInFloatSSEReg : ArgInDoubleSSEReg;
arg_info->pair_regs [0] = float_regs [*current_float_reg];
(*current_float_reg) ++;
result = TRUE;
}
if (result == TRUE) {
arg_info->pair_size [0] = arg_size;
arg_info->nregs = 1;
}
return result;
}
static gboolean
allocate_parameter_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg)
{
return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, param_regs, PARAM_REGS, float_param_regs, FLOAT_PARAM_REGS, current_int_reg, current_float_reg);
}
static gboolean
allocate_return_register_for_valuetype_win64 (ArgInfo *arg_info, ArgumentClass arg_class, guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg)
{
return allocate_register_for_valuetype_win64 (arg_info, arg_class, arg_size, return_regs, RETURN_REGS, float_return_regs, FLOAT_RETURN_REGS, current_int_reg, current_float_reg);
}
static void
allocate_storage_for_valuetype_win64 (ArgInfo *arg_info, MonoType *type, gboolean is_return, ArgumentClass arg_class,
guint32 arg_size, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size)
{
/* Windows x64 value type ABI.
*
* Parameters: https://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
*
* Integer/Float types smaller than or equals to 8 bytes or porperly sized struct/union (1,2,4,8)
* Try pass in register using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8), if no more registers, pass on stack using ArgOnStack as storage and size of parameter(1,2,4,8).
* Integer/Float types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7).
* Try to pass pointer in register using ArgValuetypeAddrInIReg, if no more registers, pass pointer on stack using ArgValuetypeAddrOnStack as storage and parameter size of register (8 bytes).
*
* Return values: https://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
*
* Integers/Float types smaller than or equal to 8 bytes
* Return in corresponding register RAX/XMM0 using ArgValuetypeInReg/(ArgInIReg|ArgInFloatSSEReg|ArgInDoubleSSEReg) as storage and size of parameter(1,2,4,8).
* Properly sized struct/unions (1,2,4,8)
* Return in register RAX using ArgValuetypeInReg as storage and size of parameter(1,2,4,8).
* Types bigger than 8 bytes or struct/unions larger than 8 bytes or (3,5,6,7).
* Return pointer to allocated stack space (allocated by caller) using ArgValuetypeAddrInIReg as storage and parameter size.
*/
assert (arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL);
if (!is_return) {
/* Parameter cases. */
if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) {
assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8);
/* First, try to use registers for parameter. If type is struct it can only be passed by value in integer register. */
arg_info->storage = ArgValuetypeInReg;
if (!allocate_parameter_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) {
/* No more registers, fallback passing parameter on stack as value. */
assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0);
/* Passing value directly on stack, so use size of value. */
arg_info->storage = ArgOnStack;
arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t));
arg_info->offset = *stack_size;
arg_info->arg_size = arg_size;
*stack_size += arg_size;
}
} else {
/* Fallback to stack, try to pass address to parameter in register. Always use integer register to represent stack address. */
arg_info->storage = ArgValuetypeAddrInIReg;
if (!allocate_parameter_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg)) {
/* No more registers, fallback passing address to parameter on stack. */
assert (arg_info->pair_storage [0] == ArgNone && arg_info->pair_storage [1] == ArgNone && arg_info->pair_size [0] == 0 && arg_info->pair_size [1] == 0 && arg_info->nregs == 0);
/* Passing an address to value on stack, so use size of register as argument size. */
arg_info->storage = ArgValuetypeAddrOnStack;
arg_size = sizeof (target_mgreg_t);
arg_info->offset = *stack_size;
arg_info->arg_size = arg_size;
*stack_size += arg_size;
}
}
} else {
/* Return value cases. */
if (arg_class != ARG_CLASS_MEMORY && MONO_WIN64_VALUE_TYPE_FITS_REG (arg_size)) {
assert (arg_size == 1 || arg_size == 2 || arg_size == 4 || arg_size == 8);
/* Return value fits into return registers. If type is struct it can only be returned by value in integer register. */
arg_info->storage = ArgValuetypeInReg;
allocate_return_register_for_valuetype_win64 (arg_info, !MONO_TYPE_ISSTRUCT (type) ? arg_class : ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg);
/* Only RAX/XMM0 should be used to return valuetype. */
assert ((arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone) || (arg_info->pair_regs[0] == AMD64_XMM0 && arg_info->pair_regs[1] == ArgNone));
} else {
/* Return value doesn't fit into return register, return address to allocated stack space (allocated by caller and passed as input). */
arg_info->storage = ArgValuetypeAddrInIReg;
allocate_return_register_for_valuetype_win64 (arg_info, ARG_CLASS_INTEGER, arg_size, current_int_reg, current_float_reg);
/* Only RAX should be used to return valuetype address. */
assert (arg_info->pair_regs[0] == AMD64_RAX && arg_info->pair_regs[1] == ArgNone);
arg_size = ALIGN_TO (arg_size, sizeof (target_mgreg_t));
arg_info->offset = *stack_size;
*stack_size += arg_size;
}
}
}
static void
get_valuetype_size_win64 (MonoClass *klass, gboolean pinvoke, ArgInfo *arg_info, MonoType *type, ArgumentClass *arg_class, guint32 *arg_size)
{
*arg_size = 0;
*arg_class = ARG_CLASS_NO_CLASS;
assert (klass != NULL && arg_info != NULL && type != NULL && arg_class != NULL && arg_size != NULL);
if (pinvoke) {
/* Calculate argument class type and size of marshalled type. */
MonoMarshalType *info = mono_marshal_load_type_info (klass);
*arg_size = info->native_size;
} else {
/* Calculate argument class type and size of managed type. */
*arg_size = mono_class_value_size (klass, NULL);
}
/* Windows ABI only handle value types on stack or passed in integer register (if it fits register size). */
*arg_class = MONO_WIN64_VALUE_TYPE_FITS_REG (*arg_size) ? ARG_CLASS_INTEGER : ARG_CLASS_MEMORY;
if (*arg_class == ARG_CLASS_MEMORY) {
/* Value type has a size that doesn't seem to fit register according to ABI. Try to used full stack size of type. */
*arg_size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, pinvoke);
}
/*
* Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte.
* GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html.
* This cause a little dilemma since runtime build using none GCC compiler will not be compatible with
* GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte
* it must be represented in call and cannot be dropped.
*/
if (*arg_size == 0 && MONO_TYPE_ISSTRUCT (type)) {
arg_info->pass_empty_struct = TRUE;
*arg_size = SIZEOF_REGISTER;
*arg_class = ARG_CLASS_INTEGER;
}
assert (*arg_class != ARG_CLASS_NO_CLASS);
}
static void
add_valuetype_win64 (MonoMethodSignature *signature, ArgInfo *arg_info, MonoType *type,
gboolean is_return, guint32 *current_int_reg, guint32 *current_float_reg, guint32 *stack_size)
{
guint32 arg_size = SIZEOF_REGISTER;
MonoClass *klass = NULL;
ArgumentClass arg_class;
assert (signature != NULL && arg_info != NULL && type != NULL && current_int_reg != NULL && current_float_reg != NULL && stack_size != NULL);
klass = mono_class_from_mono_type_internal (type);
get_valuetype_size_win64 (klass, signature->pinvoke && !signature->marshalling_disabled, arg_info, type, &arg_class, &arg_size);
/* Only drop value type if its not an empty struct as input that must be represented in call */
if ((arg_size == 0 && !arg_info->pass_empty_struct) || (arg_info->pass_empty_struct && is_return)) {
arg_info->storage = ArgValuetypeInReg;
arg_info->pair_storage [0] = arg_info->pair_storage [1] = ArgNone;
} else {
/* Alocate storage for value type. */
allocate_storage_for_valuetype_win64 (arg_info, type, is_return, arg_class, arg_size, current_int_reg, current_float_reg, stack_size);
}
}
#endif /* TARGET_WIN32 */
static void
add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
guint32 *gr, guint32 *fr, guint32 *stack_size)
{
#ifdef TARGET_WIN32
add_valuetype_win64 (sig, ainfo, type, is_return, gr, fr, stack_size);
#else
guint32 size, quad, nquads, i, nfields;
/* Keep track of the size used in each quad so we can */
/* use the right size when copying args/return vars. */
guint32 quadsize [2] = {8, 8};
ArgumentClass args [2];
StructFieldInfo *fields = NULL;
GArray *fields_array;
MonoClass *klass;
gboolean pass_on_stack = FALSE;
int struct_size;
klass = mono_class_from_mono_type_internal (type);
size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled);
if (!sig->pinvoke && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) {
/* We pass and return vtypes of size 8 in a register */
} else if (!sig->pinvoke || (size == 0) || (size > 16)) {
pass_on_stack = TRUE;
}
/* If this struct can't be split up naturally into 8-byte */
/* chunks (registers), pass it on the stack. */
if (sig->pinvoke && !sig->marshalling_disabled) {
MonoMarshalType *info = mono_marshal_load_type_info (klass);
g_assert (info);
struct_size = info->native_size;
} else {
struct_size = mono_class_value_size (klass, NULL);
}
/*
* Collect field information recursively to be able to
* handle nested structures.
*/
fields_array = g_array_new (FALSE, TRUE, sizeof (StructFieldInfo));
collect_field_info_nested (klass, fields_array, 0, sig->pinvoke && !sig->marshalling_disabled, m_class_is_unicode (klass));
fields = (StructFieldInfo*)fields_array->data;
nfields = fields_array->len;
for (i = 0; i < nfields; ++i) {
if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) {
pass_on_stack = TRUE;
break;
}
}
if (size == 0) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
return;
}
if (pass_on_stack) {
/* Allways pass in memory */
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (size, 8);
ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
if (!is_return)
ainfo->arg_size = ALIGN_TO (size, 8);
g_array_free (fields_array, TRUE);
return;
}
if (size > 8)
nquads = 2;
else
nquads = 1;
if (!sig->pinvoke) {
int n = mono_class_value_size (klass, NULL);
quadsize [0] = n >= 8 ? 8 : n;
quadsize [1] = n >= 8 ? MAX (n - 8, 8) : 0;
/* Always pass in 1 or 2 integer registers */
args [0] = ARG_CLASS_INTEGER;
args [1] = ARG_CLASS_INTEGER;
/* Only the simplest cases are supported */
if (is_return && nquads != 1) {
args [0] = ARG_CLASS_MEMORY;
args [1] = ARG_CLASS_MEMORY;
}
} else {
/*
* Implement the algorithm from section 3.2.3 of the X86_64 ABI.
* The X87 and SSEUP stuff is left out since there are no such types in
* the CLR.
*/
if (!nfields) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
return;
}
if (struct_size > 16) {
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (struct_size, 8);
ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
if (!is_return)
ainfo->arg_size = ALIGN_TO (struct_size, 8);
g_array_free (fields_array, TRUE);
return;
}
args [0] = ARG_CLASS_NO_CLASS;
args [1] = ARG_CLASS_NO_CLASS;
for (quad = 0; quad < nquads; ++quad) {
ArgumentClass class1;
if (nfields == 0)
class1 = ARG_CLASS_MEMORY;
else
class1 = ARG_CLASS_NO_CLASS;
for (i = 0; i < nfields; ++i) {
if ((fields [i].offset < 8) && (fields [i].offset + fields [i].size) > 8) {
/* Unaligned field */
NOT_IMPLEMENTED;
}
/* Skip fields in other quad */
if ((quad == 0) && (fields [i].offset >= 8))
continue;
if ((quad == 1) && (fields [i].offset < 8))
continue;
/* How far into this quad this data extends.*/
/* (8 is size of quad) */
quadsize [quad] = fields [i].offset + fields [i].size - (quad * 8);
class1 = merge_argument_class_from_type (fields [i].type, class1);
}
/* Empty structs have a nonzero size, causing this assert to be hit */
if (sig->pinvoke)
g_assert (class1 != ARG_CLASS_NO_CLASS);
args [quad] = class1;
}
}
g_array_free (fields_array, TRUE);
/* Post merger cleanup */
if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY))
args [0] = args [1] = ARG_CLASS_MEMORY;
/* Allocate registers */
{
int orig_gr = *gr;
int orig_fr = *fr;
while (quadsize [0] != 1 && quadsize [0] != 2 && quadsize [0] != 4 && quadsize [0] != 8)
quadsize [0] ++;
while (quadsize [1] != 0 && quadsize [1] != 1 && quadsize [1] != 2 && quadsize [1] != 4 && quadsize [1] != 8)
quadsize [1] ++;
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
g_assert (quadsize [0] <= 8);
g_assert (quadsize [1] <= 8);
ainfo->pair_size [0] = quadsize [0];
ainfo->pair_size [1] = quadsize [1];
ainfo->nregs = nquads;
for (quad = 0; quad < nquads; ++quad) {
switch (args [quad]) {
case ARG_CLASS_INTEGER:
if (*gr >= PARAM_REGS)
args [quad] = ARG_CLASS_MEMORY;
else {
ainfo->pair_storage [quad] = ArgInIReg;
if (is_return)
ainfo->pair_regs [quad] = return_regs [*gr];
else
ainfo->pair_regs [quad] = param_regs [*gr];
(*gr) ++;
}
break;
case ARG_CLASS_SSE:
if (*fr >= FLOAT_PARAM_REGS)
args [quad] = ARG_CLASS_MEMORY;
else {
if (quadsize[quad] <= 4)
ainfo->pair_storage [quad] = ArgInFloatSSEReg;
else ainfo->pair_storage [quad] = ArgInDoubleSSEReg;
ainfo->pair_regs [quad] = *fr;
(*fr) ++;
}
break;
case ARG_CLASS_MEMORY:
break;
case ARG_CLASS_NO_CLASS:
break;
default:
g_assert_not_reached ();
}
}
if ((args [0] == ARG_CLASS_MEMORY) || (args [1] == ARG_CLASS_MEMORY)) {
int arg_size;
/* Revert possible register assignments */
*gr = orig_gr;
*fr = orig_fr;
ainfo->offset = *stack_size;
if (sig->pinvoke)
arg_size = ALIGN_TO (struct_size, 8);
else
arg_size = nquads * sizeof (target_mgreg_t);
*stack_size += arg_size;
ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
if (!is_return)
ainfo->arg_size = arg_size;
}
}
#endif /* !TARGET_WIN32 */
}
/*
* get_call_info:
*
* Obtain information about a call according to the calling convention.
* For AMD64 System V, see the "System V ABI, x86-64 Architecture Processor Supplement
* Draft Version 0.23" document for more information.
* For AMD64 Windows, see "Overview of x64 Calling Conventions",
* https://msdn.microsoft.com/en-us/library/ms235286.aspx
*/
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
CallInfo *cinfo;
gboolean is_pinvoke = sig->pinvoke;
if (mp)
cinfo = (CallInfo *)mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = (CallInfo *)g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
cinfo->gsharedvt = mini_is_gsharedvt_variable_signature (sig);
gr = 0;
fr = 0;
#ifdef TARGET_WIN32
/* Reserve space where the callee can save the argument registers */
stack_size = 4 * sizeof (target_mgreg_t);
#endif
/* return value */
ret_type = mini_get_underlying_type (sig->ret);
switch (ret_type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
case MONO_TYPE_R4:
cinfo->ret.storage = ArgInFloatSSEReg;
cinfo->ret.reg = AMD64_XMM0;
break;
case MONO_TYPE_R8:
cinfo->ret.storage = ArgInDoubleSSEReg;
cinfo->ret.reg = AMD64_XMM0;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
}
if (mini_is_gsharedvt_type (ret_type)) {
cinfo->ret.storage = ArgGsharedvtVariableInReg;
break;
}
/* fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
g_assert (cinfo->ret.storage != ArgInIReg);
break;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (ret_type));
cinfo->ret.storage = ArgGsharedvtVariableInReg;
break;
case MONO_TYPE_VOID:
break;
default:
g_error ("Can't handle as return value 0x%x", ret_type->type);
}
pstart = 0;
/*
* To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
* the first argument, allowing 'this' to be always passed in the first arg reg.
* Also do this if the first argument is a reference type, since virtual calls
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
ArgStorage ret_storage = cinfo->ret.storage;
if ((ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0);
} else {
add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
pstart = 1;
}
add_general (&gr, &stack_size, &cinfo->ret);
cinfo->ret.storage = ret_storage;
cinfo->vret_arg_index = 1;
} else {
/* this */
if (sig->hasthis)
add_general (&gr, &stack_size, cinfo->args + 0);
if (ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) {
add_general (&gr, &stack_size, &cinfo->ret);
cinfo->ret.storage = ret_storage;
}
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
fr = FLOAT_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
#ifdef TARGET_WIN32
/* The float param registers and other param registers must be the same index on Windows x64.*/
if (gr > fr)
fr = gr;
else if (fr > gr)
gr = fr;
#endif
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* We allways pass the sig cookie on the stack for simplicity */
/*
* Prevent implicit arguments + the sig cookie from being passed
* in registers.
*/
gr = PARAM_REGS;
fr = FLOAT_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
ptype = mini_get_underlying_type (sig->params [i]);
switch (ptype->type) {
case MONO_TYPE_I1:
ainfo->is_signed = 1;
case MONO_TYPE_U1:
add_general (&gr, &stack_size, ainfo);
ainfo->byte_arg_size = 1;
break;
case MONO_TYPE_I2:
ainfo->is_signed = 1;
case MONO_TYPE_U2:
add_general (&gr, &stack_size, ainfo);
ainfo->byte_arg_size = 2;
break;
case MONO_TYPE_I4:
ainfo->is_signed = 1;
case MONO_TYPE_U4:
add_general (&gr, &stack_size, ainfo);
ainfo->byte_arg_size = 4;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (&gr, &stack_size, ainfo);
break;
}
if (mini_is_gsharedvt_variable_type (ptype)) {
/* gsharedvt arguments are passed by ref */
add_general (&gr, &stack_size, ainfo);
if (ainfo->storage == ArgInIReg)
ainfo->storage = ArgGSharedVtInReg;
else
ainfo->storage = ArgGSharedVtOnStack;
break;
}
/* fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
add_valuetype (sig, ainfo, ptype, FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_R4:
add_float (&fr, &stack_size, ainfo, FALSE);
break;
case MONO_TYPE_R8:
add_float (&fr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt arguments are passed by ref */
g_assert (mini_is_gsharedvt_type (ptype));
add_general (&gr, &stack_size, ainfo);
if (ainfo->storage == ArgInIReg)
ainfo->storage = ArgGSharedVtInReg;
else
ainfo->storage = ArgGSharedVtOnStack;
break;
default:
g_assert_not_reached ();
}
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
gr = PARAM_REGS;
fr = FLOAT_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
cinfo->stack_usage = stack_size;
cinfo->reg_usage = gr;
cinfo->freg_usage = fr;
return cinfo;
}
static int
arg_need_temp (ArgInfo *ainfo)
{
// Value types using one register doesn't need temp.
if (ainfo->storage == ArgValuetypeInReg && ainfo->nregs > 1)
return ainfo->nregs * sizeof (host_mgreg_t);
return 0;
}
static gpointer
arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
{
switch (ainfo->storage) {
case ArgInIReg:
return &ccontext->gregs [ainfo->reg];
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
return &ccontext->fregs [ainfo->reg];
case ArgOnStack:
case ArgValuetypeAddrOnStack:
return ccontext->stack + ainfo->offset;
case ArgValuetypeInReg:
// Empty struct
if (ainfo->nregs == 0)
return NULL;
// Value type using one register can be stored
// directly in its context gregs/fregs slot.
g_assert (ainfo->nregs == 1);
switch (ainfo->pair_storage [0]) {
case ArgInIReg:
return &ccontext->gregs [ainfo->pair_regs [0]];
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
return &ccontext->fregs [ainfo->pair_regs [0]];
default:
g_assert_not_reached ();
}
case ArgValuetypeAddrInIReg:
g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone);
return &ccontext->gregs [ainfo->pair_regs [0]];
default:
g_error ("Arg storage type not yet supported");
}
}
static void
arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
g_assert (arg_need_temp (ainfo));
host_mgreg_t *dest_cast = (host_mgreg_t*)dest;
/* Reconstruct the value type */
for (int k = 0; k < ainfo->nregs; k++) {
int storage_type = ainfo->pair_storage [k];
int reg_storage = ainfo->pair_regs [k];
switch (storage_type) {
case ArgInIReg:
*dest_cast = ccontext->gregs [reg_storage];
break;
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
*(double*)dest_cast = ccontext->fregs [reg_storage];
break;
default:
g_assert_not_reached ();
}
dest_cast++;
}
}
static void
arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
{
g_assert (arg_need_temp (ainfo));
host_mgreg_t *src_cast = (host_mgreg_t*)src;
for (int k = 0; k < ainfo->nregs; k++) {
int storage_type = ainfo->pair_storage [k];
int reg_storage = ainfo->pair_regs [k];
switch (storage_type) {
case ArgInIReg:
ccontext->gregs [reg_storage] = *src_cast;
break;
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
ccontext->fregs [reg_storage] = *(double*)src_cast;
break;
default:
g_assert_not_reached ();
}
src_cast++;
}
}
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
CallInfo *cinfo = get_call_info (NULL, sig);
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
gpointer storage;
ArgInfo *ainfo;
memset (ccontext, 0, sizeof (CallContext));
ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
if (ccontext->stack_size)
ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgValuetypeAddrInIReg) {
storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)storage;
}
}
g_assert (!sig->hasthis);
for (int i = 0; i < sig->param_count; i++) {
ainfo = &cinfo->args [i];
if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) {
storage = arg_get_storage (ccontext, ainfo);
*(gpointer *)storage = interp_cb->frame_arg_to_storage (frame, sig, i);
continue;
}
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size); // FIXME? alloca in a loop
else
storage = arg_get_storage (ccontext, ainfo);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
gpointer storage;
ArgInfo *ainfo;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (retp) {
g_assert (cinfo->ret.storage == ArgValuetypeAddrInIReg);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp);
#ifdef TARGET_WIN32
// Windows x64 ABI ainfo implementation includes info on how to return value type address.
// back to caller.
storage = arg_get_storage (ccontext, ainfo);
*(gpointer *)storage = retp;
#endif
} else {
g_assert (cinfo->ret.storage != ArgValuetypeAddrInIReg);
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size);
else
storage = arg_get_storage (ccontext, ainfo);
memset (ccontext, 0, sizeof (CallContext)); // FIXME
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
ainfo = &cinfo->args [i];
if (ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) {
storage = arg_get_storage (ccontext, ainfo);
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, *(gpointer *)storage);
continue;
}
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size); // FIXME? alloca in a loop
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
}
storage = NULL;
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgValuetypeAddrInIReg)
storage = (gpointer) ccontext->gregs [cinfo->ret.reg];
}
g_free (cinfo);
return storage;
}
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
ArgInfo *ainfo;
gpointer storage;
/* No return value */
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
/* The return values were stored directly at address passed in reg */
if (cinfo->ret.storage != ArgValuetypeAddrInIReg) {
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size);
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
/*
* mono_arch_get_argument_info:
* @csig: a method signature
* @param_count: the number of parameters to consider
* @arg_info: an array to store the result infos
*
* Gathers information on parameters such as size, alignment and
* padding. arg_info should be large enought to hold param_count + 1 entries.
*
* Returns the size of the argument area on the stack.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
CallInfo *cinfo = get_call_info (NULL, csig);
guint32 args_size = cinfo->stack_usage;
/* The arguments are saved to a stack area in mono_arch_instrument_prolog */
if (csig->hasthis) {
arg_info [0].offset = 0;
}
for (k = 0; k < param_count; k++) {
arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
/* FIXME: */
arg_info [k + 1].size = 0;
}
g_free (cinfo);
return args_size;
}
#ifndef DISABLE_JIT
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
&& IS_SUPPORTED_TAILCALL (callee_info->ret.storage == caller_info->ret.storage);
// Limit stack_usage to 1G. Assume 32bit limits when we move parameters.
res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30));
res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30));
// valuetype parameters are address of local
const ArgInfo *ainfo;
ainfo = callee_info->args + callee_sig->hasthis;
for (int i = 0; res && i < callee_sig->param_count; ++i) {
res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrInIReg)
&& IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgValuetypeAddrOnStack);
}
g_free (caller_info);
g_free (callee_info);
return res;
}
#endif /* DISABLE_JIT */
/*
* Initialize the cpu to execute managed code.
*/
void
mono_arch_cpu_init (void)
{
#ifndef _MSC_VER
guint16 fpcw;
/* spec compliance requires running with double precision */
__asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
fpcw &= ~X86_FPCW_PRECC_MASK;
fpcw |= X86_FPCW_PREC_DOUBLE;
__asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
__asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
#else
/* TODO: This is crashing on Win64 right now.
* _control87 (_PC_53, MCW_PC);
*/
#endif
}
/*
* Initialize architecture specific code.
*/
void
mono_arch_init (void)
{
#ifndef DISABLE_JIT
if (!mono_aot_only)
bp_trampoline = mini_get_breakpoint_trampoline ();
#endif
}
/*
* Cleanup architecture specific code.
*/
void
mono_arch_cleanup (void)
{
}
/*
* This function returns the optimizations supported on this cpu.
*/
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
guint32 opts = 0;
*exclude_mask = 0;
if (mono_hwcap_x86_has_cmov) {
opts |= MONO_OPT_CMOV;
if (mono_hwcap_x86_has_fcmov)
opts |= MONO_OPT_FCMOV;
else
*exclude_mask |= MONO_OPT_FCMOV;
} else {
*exclude_mask |= MONO_OPT_CMOV;
}
return opts;
}
MonoCPUFeatures
mono_arch_get_cpu_features (void)
{
guint64 features = MONO_CPU_INITED;
if (mono_hwcap_x86_has_sse1)
features |= MONO_CPU_X86_SSE;
if (mono_hwcap_x86_has_sse2)
features |= MONO_CPU_X86_SSE2;
if (mono_hwcap_x86_has_sse3)
features |= MONO_CPU_X86_SSE3;
if (mono_hwcap_x86_has_ssse3)
features |= MONO_CPU_X86_SSSE3;
if (mono_hwcap_x86_has_sse41)
features |= MONO_CPU_X86_SSE41;
if (mono_hwcap_x86_has_sse42)
features |= MONO_CPU_X86_SSE42;
if (mono_hwcap_x86_has_popcnt)
features |= MONO_CPU_X86_POPCNT;
if (mono_hwcap_x86_has_lzcnt)
features |= MONO_CPU_X86_LZCNT;
return (MonoCPUFeatures)features;
}
#ifndef DISABLE_JIT
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
if (mono_is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = g_list_prepend (vars, vmv);
}
}
vars = mono_varlist_sort (cfg, vars, 0);
return vars;
}
/**
* mono_arch_compute_omit_fp:
* Determine whether the frame pointer can be eliminated.
*/
static void
mono_arch_compute_omit_fp (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i, locals_size;
CallInfo *cinfo;
if (cfg->arch.omit_fp_computed)
return;
header = cfg->header;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*
* FIXME: Remove some of the restrictions.
*/
cfg->arch.omit_fp = TRUE;
cfg->arch.omit_fp_computed = TRUE;
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
if (!debug_omit_fp ())
cfg->arch.omit_fp = FALSE;
/*
if (cfg->method->save_lmf)
cfg->arch.omit_fp = FALSE;
*/
if (cfg->flags & MONO_CFG_HAS_ALLOCA)
cfg->arch.omit_fp = FALSE;
if (header->num_clauses)
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrInIReg || ainfo->storage == ArgValuetypeAddrOnStack) {
/*
* The stack offset can only be determined when the frame
* size is known.
*/
cfg->arch.omit_fp = FALSE;
}
}
locals_size = 0;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
int ialign;
locals_size += mono_type_size (ins->inst_vtype, &ialign);
}
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
mono_arch_compute_omit_fp (cfg);
if (cfg->arch.omit_fp)
regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
/* We use the callee saved registers for global allocation */
regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
#ifdef TARGET_WIN32
regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
#endif
return regs;
}
/*
* mono_arch_regalloc_cost:
*
* Return the cost, in number of memory references, of the action of
* allocating the variable VMV into a register during global register
* allocation.
*/
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
MonoInst *ins = cfg->varinfo [vmv->idx];
if (cfg->method->save_lmf)
/* The register is already saved */
/* substract 1 for the invisible store in the prolog */
return (ins->opcode == OP_ARG) ? 0 : 1;
else
/* push+pop */
return (ins->opcode == OP_ARG) ? 1 : 2;
}
/*
* mono_arch_fill_argument_info:
*
* Populate cfg->args, cfg->ret and cfg->vret_addr with information about the arguments
* of the method.
*/
void
mono_arch_fill_argument_info (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoInst *ins;
int i;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
cinfo = cfg->arch.cinfo;
/*
* Contrary to mono_arch_allocate_vars (), the information should describe
* where the arguments are at the beginning of the method, not where they can be
* accessed during the execution of the method. The later makes no sense for the
* global register allocator, since a variable can be in more than one location.
*/
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgValuetypeInReg:
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = -1;
cfg->ret->inst_offset = -1;
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
ins = cfg->args [i];
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
ins->opcode = OP_REGVAR;
ins->inst_c0 = ainfo->reg;
break;
case ArgOnStack:
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = -1;
ins->inst_offset = -1;
break;
case ArgValuetypeInReg:
/* Dummy */
ins->opcode = OP_NOP;
break;
default:
g_assert_not_reached ();
}
}
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoType *sig_ret;
MonoMethodSignature *sig;
MonoInst *ins;
int i, offset;
guint32 locals_stack_size, locals_stack_align;
gint32 *offsets;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
cinfo = cfg->arch.cinfo;
sig_ret = mini_get_underlying_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
/*
* We use the ABI calling conventions for managed code as well.
* Exception: valuetypes are only sometimes passed or returned in registers.
*/
/*
* The stack looks like this:
* <incoming arguments passed on the stack>
* <return value>
* <lmf/caller saved registers>
* <locals>
* <spill area>
* <localloc area> -> grows dynamically
* <params area>
*/
if (cfg->arch.omit_fp) {
cfg->flags |= MONO_CFG_HAS_SPILLUP;
cfg->frame_reg = AMD64_RSP;
offset = 0;
} else {
/* Locals are allocated backwards from %fp */
cfg->frame_reg = AMD64_RBP;
offset = 0;
}
cfg->arch.saved_iregs = cfg->used_int_regs;
if (cfg->method->save_lmf) {
/* Save all callee-saved registers normally (except RBP, if not already used), and restore them when unwinding through an LMF */
guint32 iregs_to_save = AMD64_CALLEE_SAVED_REGS & ~(1<<AMD64_RBP);
cfg->arch.saved_iregs |= iregs_to_save;
}
if (cfg->arch.omit_fp)
cfg->arch.reg_save_area_offset = offset;
/* Reserve space for callee saved registers */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
offset += sizeof (target_mgreg_t);
}
if (!cfg->arch.omit_fp)
cfg->arch.reg_save_area_offset = -offset;
if (sig_ret->type != MONO_TYPE_VOID) {
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cinfo->ret.reg;
cfg->ret->dreg = cinfo->ret.reg;
break;
case ArgValuetypeAddrInIReg:
case ArgGsharedvtVariableInReg:
/* The register is volatile */
cfg->vret_addr->opcode = OP_REGOFFSET;
cfg->vret_addr->inst_basereg = cfg->frame_reg;
if (cfg->arch.omit_fp) {
cfg->vret_addr->inst_offset = offset;
offset += 8;
} else {
offset += 8;
cfg->vret_addr->inst_offset = -offset;
}
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr =");
mono_print_ins (cfg->vret_addr);
}
break;
case ArgValuetypeInReg:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = cfg->frame_reg;
if (cfg->arch.omit_fp) {
cfg->ret->inst_offset = offset;
offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16;
} else {
offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16;
cfg->ret->inst_offset = - offset;
}
break;
default:
g_assert_not_reached ();
}
}
/* Allocate locals */
offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
if (cfg->arch.omit_fp) {
cfg->locals_min_stack_offset = offset;
cfg->locals_max_stack_offset = offset + locals_stack_size;
} else {
cfg->locals_min_stack_offset = - (offset + locals_stack_size);
cfg->locals_max_stack_offset = - offset;
}
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
if (offsets [i] != -1) {
MonoInst *ins = cfg->varinfo [i];
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
if (cfg->arch.omit_fp)
ins->inst_offset = (offset + offsets [i]);
else
ins->inst_offset = - (offset + offsets [i]);
//printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
}
}
offset += locals_stack_size;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
g_assert (!cfg->arch.omit_fp);
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ins = cfg->args [i];
if (ins->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
/* FIXME: Allocate volatile arguments to registers */
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
inreg = FALSE;
/*
* Under AMD64, all registers used to pass arguments to functions
* are volatile across calls.
* FIXME: Optimize this.
*/
if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg) || (ainfo->storage == ArgGSharedVtInReg))
inreg = FALSE;
ins->opcode = OP_REGOFFSET;
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
case ArgGSharedVtInReg:
if (inreg) {
ins->opcode = OP_REGVAR;
ins->dreg = ainfo->reg;
}
break;
case ArgOnStack:
case ArgGSharedVtOnStack:
g_assert (!cfg->arch.omit_fp);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = ainfo->offset + ARGS_OFFSET;
break;
case ArgValuetypeInReg:
break;
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack: {
MonoInst *indir;
g_assert (!cfg->arch.omit_fp);
g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone));
MONO_INST_NEW (cfg, indir, 0);
indir->opcode = OP_REGOFFSET;
if (ainfo->pair_storage [0] == ArgInIReg) {
indir->inst_basereg = cfg->frame_reg;
offset = ALIGN_TO (offset, sizeof (target_mgreg_t));
offset += sizeof (target_mgreg_t);
indir->inst_offset = - offset;
}
else {
indir->inst_basereg = cfg->frame_reg;
indir->inst_offset = ainfo->offset + ARGS_OFFSET;
}
ins->opcode = OP_VTARG_ADDR;
ins->inst_left = indir;
break;
}
default:
NOT_IMPLEMENTED;
}
if (!inreg && (ainfo->storage != ArgOnStack) && (ainfo->storage != ArgValuetypeAddrInIReg) && (ainfo->storage != ArgValuetypeAddrOnStack) && (ainfo->storage != ArgGSharedVtOnStack)) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
offset = ALIGN_TO (offset, sizeof (target_mgreg_t));
if (cfg->arch.omit_fp) {
ins->inst_offset = offset;
offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t);
// Arguments are yet supported by the stack map creation code
//cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
} else {
offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (target_mgreg_t) : sizeof (target_mgreg_t);
ins->inst_offset = - offset;
//cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
}
}
}
}
cfg->stack_offset = offset;
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
if (cinfo->ret.storage == ArgValuetypeAddrInIReg || cinfo->ret.storage == ArgGsharedvtVariableInReg) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *ins;
if (cfg->compile_aot) {
MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
}
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_tramp_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.bp_tramp_var = ins;
}
if (cfg->method->save_lmf)
cfg->create_lmf_var = TRUE;
if (cfg->method->save_lmf) {
cfg->lmf_ir = TRUE;
}
}
static void
add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
{
MonoInst *ins;
switch (storage) {
case ArgInIReg:
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg_copy (cfg, tree->dreg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
break;
case ArgInFloatSSEReg:
MONO_INST_NEW (cfg, ins, OP_AMD64_SET_XMMREG_R4);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
case ArgInDoubleSSEReg:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
}
}
static int
arg_storage_to_load_membase (ArgStorage storage)
{
switch (storage) {
case ArgInIReg:
#if defined(MONO_ARCH_ILP32)
return OP_LOADI8_MEMBASE;
#else
return OP_LOAD_MEMBASE;
#endif
case ArgInDoubleSSEReg:
return OP_LOADR8_MEMBASE;
case ArgInFloatSSEReg:
return OP_LOADR4_MEMBASE;
default:
g_assert_not_reached ();
}
return -1;
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmp_sig;
int sig_reg;
if (call->tailcall) // FIXME tailcall is not always yet initialized.
NOT_IMPLEMENTED;
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it are
* passed on the stack after the signature. So compensate by
* passing a different signature.
*/
tmp_sig = mono_metadata_signature_dup_full (m_class_get_image (cfg->method->klass), call->signature);
tmp_sig->param_count -= call->signature->sentinelpos;
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
sig_reg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg);
}
#ifdef ENABLE_LLVM
static LLVMArgStorage
arg_storage_to_llvm_arg_storage (MonoCompile *cfg, ArgStorage storage)
{
switch (storage) {
case ArgInIReg:
return LLVMArgInIReg;
case ArgNone:
return LLVMArgNone;
case ArgGSharedVtInReg:
case ArgGSharedVtOnStack:
return LLVMArgGSharedVt;
default:
g_assert_not_reached ();
return LLVMArgNone;
}
}
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
int j;
LLVMCallInfo *linfo;
MonoType *t, *sig_ret;
n = sig->param_count + sig->hasthis;
sig_ret = mini_get_underlying_type (sig->ret);
cinfo = get_call_info (cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
/*
* LLVM always uses the native ABI while we use our own ABI, the
* only difference is the handling of vtypes:
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
switch (cinfo->ret.storage) {
case ArgNone:
linfo->ret.storage = LLVMArgNone;
break;
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
linfo->ret.storage = LLVMArgNormal;
break;
case ArgValuetypeInReg: {
ainfo = &cinfo->ret;
if (sig->pinvoke &&
(ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg ||
ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) {
cfg->exception_message = g_strdup ("pinvoke + vtype ret");
cfg->disable_llvm = TRUE;
return linfo;
}
linfo->ret.storage = LLVMArgVtypeInReg;
for (j = 0; j < 2; ++j)
linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
break;
}
case ArgValuetypeAddrInIReg:
case ArgGsharedvtVariableInReg:
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
break;
default:
g_assert_not_reached ();
break;
}
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mini_type_get_underlying_type (t);
linfo->args [i].storage = LLVMArgNone;
switch (ainfo->storage) {
case ArgInIReg:
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgInDoubleSSEReg:
case ArgInFloatSSEReg:
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgOnStack:
if (MONO_TYPE_ISSTRUCT (t))
linfo->args [i].storage = LLVMArgVtypeByVal;
else
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgValuetypeInReg:
if (sig->pinvoke &&
(ainfo->pair_storage [0] == ArgInFloatSSEReg || ainfo->pair_storage [0] == ArgInDoubleSSEReg ||
ainfo->pair_storage [1] == ArgInFloatSSEReg || ainfo->pair_storage [1] == ArgInDoubleSSEReg)) {
cfg->exception_message = g_strdup ("pinvoke + vtypes");
cfg->disable_llvm = TRUE;
return linfo;
}
linfo->args [i].storage = LLVMArgVtypeInReg;
for (j = 0; j < 2; ++j)
linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
break;
case ArgGSharedVtInReg:
case ArgGSharedVtOnStack:
linfo->args [i].storage = LLVMArgGSharedVt;
break;
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
linfo->args [i].storage = LLVMArgVtypeAddr;
break;
default:
cfg->exception_message = g_strdup ("ainfo->storage");
cfg->disable_llvm = TRUE;
break;
}
}
return linfo;
}
#endif
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *arg, *in;
MonoMethodSignature *sig;
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
sig = call->signature;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
if (COMPILE_LLVM (cfg)) {
/* We shouldn't be called in the llvm case */
cfg->disable_llvm = TRUE;
return;
}
/*
* Emit all arguments which are passed on the stack to prevent register
* allocation problems.
*/
for (i = 0; i < n; ++i) {
MonoType *t;
ainfo = cinfo->args + i;
in = call->args [i];
if (sig->hasthis && i == 0)
t = mono_get_object_type ();
else
t = sig->params [i - sig->hasthis];
t = mini_get_underlying_type (t);
//XXX what about ArgGSharedVtOnStack here?
if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t)) {
if (!m_type_is_byref (t)) {
if (t->type == MONO_TYPE_R4)
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
else if (t->type == MONO_TYPE_R8)
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
else
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
}
if (cfg->compute_gc_maps) {
MonoInst *def;
EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t);
}
}
}
/*
* Emit all parameters passed in registers in non-reverse order for better readability
* and to help the optimization in emit_prolog ().
*/
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
in = call->args [i];
if (ainfo->storage == ArgInIReg)
add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
}
for (i = n - 1; i >= 0; --i) {
MonoType *t;
ainfo = cinfo->args + i;
in = call->args [i];
if (sig->hasthis && i == 0)
t = mono_get_object_type ();
else
t = sig->params [i - sig->hasthis];
t = mini_get_underlying_type (t);
switch (ainfo->storage) {
case ArgInIReg:
/* Already done */
break;
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
break;
case ArgOnStack:
case ArgValuetypeInReg:
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
case ArgGSharedVtInReg:
case ArgGSharedVtOnStack: {
if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t))
/* Already emitted above */
break;
guint32 align;
guint32 size;
if (sig->pinvoke && !sig->marshalling_disabled)
size = mono_type_native_stack_size (t, &align);
else {
/*
* Other backends use mono_type_stack_size (), but that
* aligns the size to 8, which is larger than the size of
* the source, leading to reads of invalid memory if the
* source is at the end of address space.
*/
size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align);
}
if (size >= 10000) {
/* Avoid asserts in emit_memcpy () */
mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Passing an argument of size '%d'.", size));
/* Continue normally */
}
if (size > 0 || ainfo->pass_empty_struct) {
MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
arg->sreg1 = in->dreg;
arg->klass = mono_class_from_mono_type_internal (t);
arg->backend.size = size;
arg->inst_p0 = call;
arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
MONO_ADD_INS (cfg->cbb, arg);
}
break;
}
default:
g_assert_not_reached ();
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos))
/* Emit the signature cookie just before the implicit arguments */
emit_sig_cookie (cfg, call, cinfo);
}
/* Handle the case where there are no implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
switch (cinfo->ret.storage) {
case ArgValuetypeInReg:
if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
/*
* Tell the JIT to use a more efficient calling convention: call using
* OP_CALL, compute the result location after the call, and save the
* result there.
*/
call->vret_in_reg = TRUE;
/*
* Nullify the instruction computing the vret addr to enable
* future optimizations.
*/
if (call->vret_var)
NULLIFY_INS (call->vret_var);
} else {
if (call->tailcall)
NOT_IMPLEMENTED;
/*
* The valuetype is in RAX:RDX after the call, need to be copied to
* the stack. Push the address here, so the call instruction can
* access it.
*/
if (!cfg->arch.vret_addr_loc) {
cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* Prevent it from being register allocated or optimized away */
cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
}
break;
case ArgValuetypeAddrInIReg:
case ArgGsharedvtVariableInReg: {
MonoInst *vtarg;
MONO_INST_NEW (cfg, vtarg, OP_MOVE);
vtarg->sreg1 = call->vret_var->dreg;
vtarg->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
break;
}
default:
break;
}
if (cfg->method->save_lmf) {
MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF);
MONO_ADD_INS (cfg->cbb, arg);
}
call->stack_usage = cinfo->stack_usage;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoInst *arg;
MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
int size = ins->backend.size;
switch (ainfo->storage) {
case ArgValuetypeInReg: {
MonoInst *load;
int part;
for (part = 0; part < 2; ++part) {
if (ainfo->pair_storage [part] == ArgNone)
continue;
if (ainfo->pass_empty_struct) {
//Pass empty struct value as 0 on platforms representing empty structs as 1 byte.
NEW_ICONST (cfg, load, 0);
}
else {
MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part]));
load->inst_basereg = src->dreg;
load->inst_offset = part * sizeof (target_mgreg_t);
switch (ainfo->pair_storage [part]) {
case ArgInIReg:
load->dreg = mono_alloc_ireg (cfg);
break;
case ArgInDoubleSSEReg:
case ArgInFloatSSEReg:
load->dreg = mono_alloc_freg (cfg);
break;
default:
g_assert_not_reached ();
}
}
MONO_ADD_INS (cfg->cbb, load);
add_outarg_reg (cfg, call, ainfo->pair_storage [part], ainfo->pair_regs [part], load);
}
break;
}
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack: {
MonoInst *vtaddr, *load;
g_assert (ainfo->storage == ArgValuetypeAddrInIReg || (ainfo->storage == ArgValuetypeAddrOnStack && ainfo->pair_storage [0] == ArgNone));
vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL);
vtaddr->backend.is_pinvoke = call->signature->pinvoke && !call->signature->marshalling_disabled;
MONO_INST_NEW (cfg, load, OP_LDADDR);
cfg->has_indirection = TRUE;
load->inst_p0 = vtaddr;
vtaddr->flags |= MONO_INST_INDIRECT;
load->type = STACK_MP;
load->klass = vtaddr->klass;
load->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, load);
mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
if (ainfo->pair_storage [0] == ArgInIReg) {
MONO_INST_NEW (cfg, arg, OP_AMD64_LEA_MEMBASE);
arg->dreg = mono_alloc_ireg (cfg);
arg->sreg1 = load->dreg;
arg->inst_imm = 0;
MONO_ADD_INS (cfg->cbb, arg);
mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, ainfo->pair_regs [0], FALSE);
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, load->dreg);
}
break;
}
case ArgGSharedVtInReg:
/* Pass by addr */
mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
break;
case ArgGSharedVtOnStack:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, src->dreg);
break;
default:
if (size == 8) {
int dreg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, dreg);
} else if (size <= 40) {
mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
} else {
// FIXME: Code growth
mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
}
if (cfg->compute_gc_maps) {
MonoInst *def;
EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass));
}
}
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (ret->type == MONO_TYPE_R4) {
if (COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
else
MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
return;
} else if (ret->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
return;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
#endif /* DISABLE_JIT */
#define EMIT_COND_BRANCH(ins,cond,sign) \
if (ins->inst_true_bb->native_offset) { \
x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
} else { \
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
if (optimize_branch_pred && \
x86_is_imm8 (ins->inst_true_bb->max_offset - offset)) \
x86_branch8 (code, cond, 0, sign); \
else \
x86_branch32 (code, cond, 0, sign); \
}
typedef struct {
MonoMethodSignature *sig;
CallInfo *cinfo;
int nstack_args, nullable_area;
} ArchDynCallInfo;
static gboolean
dyn_call_supported (MonoMethodSignature *sig, CallInfo *cinfo)
{
int i;
switch (cinfo->ret.storage) {
case ArgNone:
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
case ArgValuetypeAddrInIReg:
case ArgValuetypeInReg:
break;
default:
return FALSE;
}
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
case ArgValuetypeInReg:
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
case ArgOnStack:
break;
default:
return FALSE;
}
}
return TRUE;
}
/*
* mono_arch_dyn_call_prepare:
*
* Return a pointer to an arch-specific structure which contains information
* needed by mono_arch_get_dyn_call_args (). Return NULL if OP_DYN_CALL is not
* supported for SIG.
* This function is equivalent to ffi_prep_cif in libffi.
*/
MonoDynCallInfo*
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
ArchDynCallInfo *info;
CallInfo *cinfo;
int i, aindex;
cinfo = get_call_info (NULL, sig);
if (!dyn_call_supported (sig, cinfo)) {
g_free (cinfo);
return NULL;
}
info = g_new0 (ArchDynCallInfo, 1);
// FIXME: Preprocess the info to speed up get_dyn_call_args ().
info->sig = sig;
info->cinfo = cinfo;
info->nstack_args = 0;
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
switch (ainfo->storage) {
case ArgOnStack:
case ArgValuetypeAddrOnStack:
info->nstack_args = MAX (info->nstack_args, (ainfo->offset / sizeof (target_mgreg_t)) + (ainfo->arg_size / sizeof (target_mgreg_t)));
break;
default:
break;
}
}
for (aindex = 0; aindex < sig->param_count; aindex++) {
MonoType *t = sig->params [aindex];
ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis];
if (m_type_is_byref (t))
continue;
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
int size;
if (!(ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack)) {
/* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */
size = mono_class_value_size (klass, NULL);
info->nullable_area += size;
}
}
break;
default:
break;
}
}
info->nullable_area = ALIGN_TO (info->nullable_area, 16);
/* Align to 16 bytes */
if (info->nstack_args & 1)
info->nstack_args ++;
return (MonoDynCallInfo*)info;
}
/*
* mono_arch_dyn_call_free:
*
* Free a MonoDynCallInfo structure.
*/
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_free (ainfo->cinfo);
g_free (ainfo);
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
/* Extend the 'regs' field dynamically */
return sizeof (DynCallArgs) + (ainfo->nstack_args * sizeof (target_mgreg_t)) + ainfo->nullable_area;
}
#define PTR_TO_GREG(ptr) ((host_mgreg_t)(ptr))
#define GREG_TO_PTR(greg) ((gpointer)(greg))
/*
* mono_arch_get_start_dyn_call:
*
* Convert the arguments ARGS to a format which can be passed to OP_DYN_CALL, and
* store the result into BUF.
* ARGS should be an array of pointers pointing to the arguments.
* RET should point to a memory buffer large enought to hold the result of the
* call.
* This function should be as fast as possible, any work which does not depend
* on the actual values of the arguments should be done in
* mono_arch_dyn_call_prepare ().
* start_dyn_call + OP_DYN_CALL + finish_dyn_call is equivalent to ffi_call in
* libffi.
*/
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
int arg_index, greg, i, pindex;
MonoMethodSignature *sig = dinfo->sig;
int buffer_offset = 0;
guint8 *nullable_buffer;
static int general_param_reg_to_index [MONO_MAX_IREGS];
static int float_param_reg_to_index [MONO_MAX_FREGS];
static gboolean param_reg_to_index_inited;
if (!param_reg_to_index_inited) {
for (i = 0; i < PARAM_REGS; ++i)
general_param_reg_to_index [param_regs[i]] = i;
for (i = 0; i < FLOAT_PARAM_REGS; ++i)
float_param_reg_to_index [float_param_regs[i]] = i;
mono_memory_barrier ();
param_reg_to_index_inited = 1;
} else {
mono_memory_barrier ();
}
p->res = 0;
p->ret = ret;
p->nstack_args = dinfo->nstack_args;
arg_index = 0;
greg = 0;
pindex = 0;
/* Stored after the stack arguments */
nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + dinfo->nstack_args]);
if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
p->regs [greg ++] = PTR_TO_GREG(*(args [arg_index ++]));
if (!sig->hasthis)
pindex = 1;
}
if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg)
p->regs [greg ++] = PTR_TO_GREG (ret);
for (; pindex < sig->param_count; pindex++) {
MonoType *t = mini_get_underlying_type (sig->params [pindex]);
gpointer *arg = args [arg_index ++];
ArgInfo *ainfo = &dinfo->cinfo->args [pindex + sig->hasthis];
int slot;
if (ainfo->storage == ArgOnStack || ainfo->storage == ArgValuetypeAddrOnStack) {
slot = PARAM_REGS + (ainfo->offset / sizeof (target_mgreg_t));
} else if (ainfo->storage == ArgValuetypeAddrInIReg) {
g_assert (ainfo->pair_storage [0] == ArgInIReg && ainfo->pair_storage [1] == ArgNone);
slot = general_param_reg_to_index [ainfo->pair_regs [0]];
} else if (ainfo->storage == ArgInFloatSSEReg || ainfo->storage == ArgInDoubleSSEReg) {
slot = float_param_reg_to_index [ainfo->reg];
} else {
slot = general_param_reg_to_index [ainfo->reg];
}
if (m_type_is_byref (t)) {
p->regs [slot] = PTR_TO_GREG (*(arg));
continue;
}
switch (t->type) {
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if !defined(MONO_ARCH_ILP32)
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#endif
p->regs [slot] = PTR_TO_GREG (*(arg));
break;
#if defined(MONO_ARCH_ILP32)
case MONO_TYPE_I8:
case MONO_TYPE_U8:
p->regs [slot] = *(guint64*)(arg);
break;
#endif
case MONO_TYPE_U1:
p->regs [slot] = *(guint8*)(arg);
break;
case MONO_TYPE_I1:
p->regs [slot] = *(gint8*)(arg);
break;
case MONO_TYPE_I2:
p->regs [slot] = *(gint16*)(arg);
break;
case MONO_TYPE_U2:
p->regs [slot] = *(guint16*)(arg);
break;
case MONO_TYPE_I4:
p->regs [slot] = *(gint32*)(arg);
break;
case MONO_TYPE_U4:
p->regs [slot] = *(guint32*)(arg);
break;
case MONO_TYPE_R4: {
double d;
*(float*)&d = *(float*)(arg);
if (ainfo->storage == ArgOnStack) {
*(double *)(p->regs + slot) = d;
} else {
p->has_fp = 1;
p->fregs [slot] = d;
}
break;
}
case MONO_TYPE_R8:
if (ainfo->storage == ArgOnStack) {
*(double *)(p->regs + slot) = *(double*)(arg);
} else {
p->has_fp = 1;
p->fregs [slot] = *(double*)(arg);
}
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
p->regs [slot] = PTR_TO_GREG (*(arg));
break;
} else if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
guint8 *nullable_buf;
int size;
size = mono_class_value_size (klass, NULL);
if (ainfo->storage == ArgValuetypeInReg || ainfo->storage == ArgOnStack) {
nullable_buf = g_alloca (size);
} else {
nullable_buf = nullable_buffer + buffer_offset;
buffer_offset += size;
g_assert (buffer_offset <= dinfo->nullable_area);
}
/* The argument pointed to by arg is either a boxed vtype or null */
mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
arg = (gpointer*)nullable_buf;
/* Fall though */
} else {
/* Fall through */
}
case MONO_TYPE_VALUETYPE: {
switch (ainfo->storage) {
case ArgValuetypeInReg:
for (i = 0; i < 2; ++i) {
switch (ainfo->pair_storage [i]) {
case ArgNone:
break;
case ArgInIReg:
slot = general_param_reg_to_index [ainfo->pair_regs [i]];
p->regs [slot] = ((target_mgreg_t*)(arg))[i];
break;
case ArgInFloatSSEReg: {
double d;
p->has_fp = 1;
slot = float_param_reg_to_index [ainfo->pair_regs [i]];
*(float*)&d = ((float*)(arg))[i];
p->fregs [slot] = d;
break;
}
case ArgInDoubleSSEReg:
p->has_fp = 1;
slot = float_param_reg_to_index [ainfo->pair_regs [i]];
p->fregs [slot] = ((double*)(arg))[i];
break;
default:
g_assert_not_reached ();
break;
}
}
break;
case ArgValuetypeAddrInIReg:
case ArgValuetypeAddrOnStack:
// In DYNCALL use case value types are already copied when included in parameter array.
// Currently no need to make an extra temporary value type on stack for this use case.
p->regs [slot] = (target_mgreg_t)arg;
break;
case ArgOnStack:
for (i = 0; i < ainfo->arg_size / 8; ++i)
p->regs [slot + i] = ((target_mgreg_t*)(arg))[i];
break;
default:
g_assert_not_reached ();
break;
}
break;
}
default:
g_assert_not_reached ();
}
}
}
/*
* mono_arch_finish_dyn_call:
*
* Store the result of a dyn call into the return value buffer passed to
* start_dyn_call ().
* This function should be as fast as possible, any work which does not depend
* on the actual values of the arguments should be done in
* mono_arch_dyn_call_prepare ().
*/
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
MonoMethodSignature *sig = dinfo->sig;
DynCallArgs *dargs = (DynCallArgs*)buf;
guint8 *ret = dargs->ret;
host_mgreg_t res = dargs->res;
MonoType *sig_ret = mini_get_underlying_type (sig->ret);
int i;
switch (sig_ret->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
case MONO_TYPE_OBJECT:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
*(gpointer*)ret = GREG_TO_PTR (res);
break;
case MONO_TYPE_I1:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
*(gint32*)ret = res;
break;
case MONO_TYPE_U4:
*(guint32*)ret = res;
break;
case MONO_TYPE_I8:
*(gint64*)ret = res;
break;
case MONO_TYPE_U8:
*(guint64*)ret = res;
break;
case MONO_TYPE_R4:
*(float*)ret = *(float*)&(dargs->fregs [0]);
break;
case MONO_TYPE_R8:
*(double*)ret = dargs->fregs [0];
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (sig_ret)) {
*(gpointer*)ret = GREG_TO_PTR(res);
break;
} else {
/* Fall through */
}
case MONO_TYPE_VALUETYPE:
if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg || dinfo->cinfo->ret.storage == ArgGsharedvtVariableInReg) {
/* Nothing to do */
} else {
ArgInfo *ainfo = &dinfo->cinfo->ret;
g_assert (ainfo->storage == ArgValuetypeInReg);
for (i = 0; i < 2; ++i) {
switch (ainfo->pair_storage [0]) {
case ArgInIReg:
((host_mgreg_t*)ret)[i] = res;
break;
case ArgInDoubleSSEReg:
((double*)ret)[i] = dargs->fregs [i];
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
break;
}
}
}
break;
default:
g_assert_not_reached ();
}
}
#undef PTR_TO_GREG
#undef GREG_TO_PTR
/* emit an exception if condition is fail */
#define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
do { \
MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
if (tins == NULL) { \
mono_add_patch_info (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_EXC, exc_name); \
x86_branch32 (code, cond, 0, signed); \
} else { \
EMIT_COND_BRANCH (tins, cond, signed); \
} \
} while (0);
#define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \
amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \
amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \
amd64_ ##op (code); \
amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \
amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \
} while (0);
#ifndef DISABLE_JIT
static guint8*
emit_call (MonoCompile *cfg, MonoCallInst *call, guint8 *code, MonoJitICallId jit_icall_id)
{
gboolean no_patch = FALSE;
MonoJumpInfoTarget patch;
// FIXME? This is similar to mono_call_to_patch, except it favors MONO_PATCH_INFO_ABS over call->jit_icall_id.
if (jit_icall_id) {
g_assert (!call);
patch.type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch.target = GUINT_TO_POINTER (jit_icall_id);
} else if (call->inst.flags & MONO_INST_HAS_METHOD) {
patch.type = MONO_PATCH_INFO_METHOD;
patch.target = call->method;
} else {
patch.type = MONO_PATCH_INFO_ABS;
patch.target = call->fptr;
}
/*
* FIXME: Add support for thunks
*/
{
gboolean near_call = FALSE;
/*
* Indirect calls are expensive so try to make a near call if possible.
* The caller memory is allocated by the code manager so it is
* guaranteed to be at a 32 bit offset.
*/
if (patch.type != MONO_PATCH_INFO_ABS) {
/* The target is in memory allocated using the code manager */
near_call = TRUE;
if (patch.type == MONO_PATCH_INFO_METHOD) {
MonoMethod* const method = call->method;
if (m_class_get_image (method->klass)->aot_module)
/* The callee might be an AOT method */
near_call = FALSE;
if (method->dynamic)
/* The target is in malloc-ed memory */
near_call = FALSE;
} else {
/*
* The call might go directly to a native function without
* the wrapper.
*/
MonoJitICallInfo * const mi = mono_find_jit_icall_info (jit_icall_id);
gconstpointer target = mono_icall_get_wrapper (mi);
if ((((guint64)target) >> 32) != 0)
near_call = FALSE;
}
} else {
MonoJumpInfo *jinfo = NULL;
if (cfg->abs_patches)
jinfo = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, call->fptr);
if (jinfo) {
if (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
MonoJitICallInfo *mi = mono_find_jit_icall_info (jinfo->data.jit_icall_id);
if (mi && (((guint64)mi->func) >> 32) == 0)
near_call = TRUE;
no_patch = TRUE;
} else {
/*
* This is not really an optimization, but required because the
* generic class init trampolines use R11 to pass the vtable.
*/
near_call = TRUE;
}
} else {
jit_icall_id = call->jit_icall_id;
if (jit_icall_id) {
MonoJitICallInfo const *info = mono_find_jit_icall_info (jit_icall_id);
// Change patch from MONO_PATCH_INFO_ABS to MONO_PATCH_INFO_JIT_ICALL_ID.
patch.type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch.target = GUINT_TO_POINTER (jit_icall_id);
if (info->func == info->wrapper) {
/* No wrapper */
if ((((guint64)info->func) >> 32) == 0)
near_call = TRUE;
} else {
/* ?See the comment in mono_codegen ()? */
near_call = TRUE;
}
}
else if ((((guint64)patch.target) >> 32) == 0) {
near_call = TRUE;
no_patch = TRUE;
}
}
}
if (cfg->method->dynamic)
/* These methods are allocated using malloc */
near_call = FALSE;
#ifdef MONO_ARCH_NOMAP32BIT
near_call = FALSE;
#endif
/* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */
if (optimize_for_xen)
near_call = FALSE;
if (cfg->compile_aot) {
near_call = TRUE;
no_patch = TRUE;
}
if (near_call) {
/*
* Align the call displacement to an address divisible by 4 so it does
* not span cache lines. This is required for code patching to work on SMP
* systems.
*/
if (!no_patch && ((guint32)(code + 1 - cfg->native_code) % 4) != 0) {
guint32 pad_size = 4 - ((guint32)(code + 1 - cfg->native_code) % 4);
amd64_padding (code, pad_size);
}
mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target);
amd64_call_code (code, 0);
}
else {
if (!no_patch && ((guint32)(code + 2 - cfg->native_code) % 8) != 0) {
guint32 pad_size = 8 - ((guint32)(code + 2 - cfg->native_code) % 8);
amd64_padding (code, pad_size);
g_assert ((guint64)(code + 2 - cfg->native_code) % 8 == 0);
}
mono_add_patch_info (cfg, code - cfg->native_code, patch.type, patch.target);
amd64_set_reg_template (code, GP_SCRATCH_REG);
amd64_call_reg (code, GP_SCRATCH_REG);
}
}
set_code_cursor (cfg, code);
return code;
}
static int
store_membase_imm_to_store_membase_reg (int opcode)
{
switch (opcode) {
case OP_STORE_MEMBASE_IMM:
return OP_STORE_MEMBASE_REG;
case OP_STOREI4_MEMBASE_IMM:
return OP_STOREI4_MEMBASE_REG;
case OP_STOREI8_MEMBASE_IMM:
return OP_STOREI8_MEMBASE_REG;
}
return -1;
}
#define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB) || ((opcode) == OP_ISBB_IMM)))
/*
* mono_arch_peephole_pass_1:
*
* Perform peephole opts which should/can be performed before local regalloc
*/
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_IADD_IMM:
case OP_LADD_IMM:
if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS) && (ins->inst_imm > 0)) {
/*
* X86_LEA is like ADD, but doesn't have the
* sreg1==dreg restriction. inst_imm > 0 is needed since LEA sign-extends
* its operand to 64 bit.
*/
ins->opcode = ins->opcode == OP_IADD_IMM ? OP_X86_LEA_MEMBASE : OP_AMD64_LEA_MEMBASE;
ins->inst_basereg = ins->sreg1;
}
break;
case OP_LXOR:
case OP_IXOR:
if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) {
MonoInst *ins2;
/*
* Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since
* the latter has length 2-3 instead of 6 (reverse constant
* propagation). These instruction sequences are very common
* in the initlocals bblock.
*/
for (ins2 = ins->next; ins2; ins2 = ins2->next) {
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
} else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
/* Continue */
} else if (ins2->opcode == OP_IL_SEQ_POINT) {
/* Continue */
} else {
break;
}
}
}
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
/* OP_COMPARE_IMM (reg, 0)
* -->
* OP_AMD64_TEST_NULL (reg)
*/
if (!ins->inst_imm)
ins->opcode = OP_AMD64_TEST_NULL;
break;
case OP_ICOMPARE_IMM:
if (!ins->inst_imm)
ins->opcode = OP_X86_TEST_NULL;
break;
case OP_AMD64_ICOMPARE_MEMBASE_IMM:
/*
* OP_STORE_MEMBASE_REG reg, offset(basereg)
* OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
* -->
* OP_STORE_MEMBASE_REG reg, offset(basereg)
* OP_COMPARE_IMM reg, imm
*
* Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
*/
if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = OP_ICOMPARE_IMM;
ins->sreg1 = last_ins->sreg1;
/* check if we can remove cmp reg,0 with test null */
if (!ins->inst_imm)
ins->opcode = OP_X86_TEST_NULL;
}
break;
}
mono_peephole_ins (bb, ins);
}
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_ICONST:
case OP_I8CONST: {
MonoInst *next = mono_inst_next (ins, FILTER_IL_SEQ_POINT);
/* reg = 0 -> XOR (reg, reg) */
/* XOR sets cflags on x86, so we cant do it always */
if (ins->inst_c0 == 0 && (!next || (next && INST_IGNORES_CFLAGS (next->opcode)))) {
ins->opcode = OP_LXOR;
ins->sreg1 = ins->dreg;
ins->sreg2 = ins->dreg;
/* Fall through */
} else {
break;
}
}
case OP_LXOR:
/*
* Use IXOR to avoid a rex prefix if possible. The cpu will sign extend the
* 0 result into 64 bits.
*/
if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) {
ins->opcode = OP_IXOR;
}
/* Fall through */
case OP_IXOR:
if ((ins->sreg1 == ins->sreg2) && (ins->sreg1 == ins->dreg)) {
MonoInst *ins2;
/*
* Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since
* the latter has length 2-3 instead of 6 (reverse constant
* propagation). These instruction sequences are very common
* in the initlocals bblock.
*/
for (ins2 = ins->next; ins2; ins2 = ins2->next) {
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
} else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START) || (ins2->opcode == OP_GC_LIVENESS_DEF) || (ins2->opcode == OP_GC_LIVENESS_USE)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
/* Continue */
} else if (ins2->opcode == OP_IL_SEQ_POINT) {
/* Continue */
} else {
break;
}
}
}
break;
case OP_IADD_IMM:
if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
ins->opcode = OP_X86_INC_REG;
break;
case OP_ISUB_IMM:
if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
ins->opcode = OP_X86_DEC_REG;
break;
}
mono_peephole_ins (bb, ins);
}
}
#define NEW_INS(cfg,ins,dest,op) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
(dest)->cil_code = (ins)->cil_code; \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
#define NEW_SIMD_INS(cfg,ins,dest,op,d,s1,s2) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
(dest)->cil_code = (ins)->cil_code; \
(dest)->dreg = d; \
(dest)->sreg1 = s1; \
(dest)->sreg2 = s2; \
(dest)->type = STACK_VTYPE; \
(dest)->klass = ins->klass; \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
static int
simd_type_to_comp_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PCMPEQB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PCMPEQW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PCMPEQD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PCMPEQQ; // SSE 4.1
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PCMPEQQ; // SSE 4.1
#else
return OP_PCMPEQD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_sub_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PSUBB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PSUBW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PSUBD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PSUBQ;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PSUBQ;
#else
return OP_PSUBD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_shl_op (int t)
{
switch (t) {
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PSHLW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PSHLD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PSHLQ;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PSHLD;
#else
return OP_PSHLQ;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_gt_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PCMPGTB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PCMPGTW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PCMPGTD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PCMPGTQ; // SSE 4.2
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PCMPGTQ; // SSE 4.2
#else
return OP_PCMPGTD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_max_un_op (int t)
{
switch (t) {
case MONO_TYPE_U1:
return OP_PMAXB_UN;
case MONO_TYPE_U2:
return OP_PMAXW_UN; // SSE 4.1
case MONO_TYPE_U4:
return OP_PMAXD_UN; // SSE 4.1
//case MONO_TYPE_U8:
// return OP_PMAXQ_UN; // AVX
#if TARGET_SIZEOF_VOID_P == 8
//case MONO_TYPE_U:
// return OP_PMAXQ_UN; // AVX
#else
case MONO_TYPE_U:
return OP_PMAXD_UN; // SSE 4.1
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_add_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_PADDB;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_PADDW;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_PADDD;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_PADDQ;
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return OP_PADDQ;
#else
return OP_PADDD;
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_min_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
return OP_PMINB; // SSE 4.1
case MONO_TYPE_U1:
return OP_PMINB_UN; // SSE 4.1
case MONO_TYPE_I2:
return OP_PMINW;
case MONO_TYPE_U2:
return OP_PMINW_UN;
case MONO_TYPE_I4:
return OP_PMIND; // SSE 4.1
case MONO_TYPE_U4:
return OP_PMIND_UN; // SSE 4.1
// case MONO_TYPE_I8: // AVX
// case MONO_TYPE_U8:
#if TARGET_SIZEOF_VOID_P == 8
//case MONO_TYPE_I: // AVX
//case MONO_TYPE_U:
#else
case MONO_TYPE_I:
return OP_PMIND; // SSE 4.1
case MONO_TYPE_U:
return OP_PMIND_UN; // SSE 4.1
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static int
simd_type_to_max_op (int t)
{
switch (t) {
case MONO_TYPE_I1:
return OP_PMAXB; // SSE 4.1
case MONO_TYPE_U1:
return OP_PMAXB_UN; // SSE 4.1
case MONO_TYPE_I2:
return OP_PMAXW;
case MONO_TYPE_U2:
return OP_PMAXW_UN;
case MONO_TYPE_I4:
return OP_PMAXD; // SSE 4.1
case MONO_TYPE_U4:
return OP_PMAXD_UN; // SSE 4.1
// case MONO_TYPE_I8: // AVX
// case MONO_TYPE_U8:
#if TARGET_SIZEOF_VOID_P == 8
//case MONO_TYPE_I: // AVX
//case MONO_TYPE_U:
#else
case MONO_TYPE_I:
return OP_PMAXD; // SSE 4.1
case MONO_TYPE_U:
return OP_PMAXD_UN; // SSE 4.1
#endif
default:
g_assert_not_reached ();
return -1;
}
}
static void
emit_simd_comp_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (!mono_hwcap_x86_has_sse42 && (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8 || is64BitNativeInt)) {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_reg2, temp_reg1, -1);
temp->inst_c0 = 0xB1;
NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, dreg, temp_reg1, temp_reg2);
} else {
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_comp_op (type), dreg, sreg1, sreg2);
}
}
static void
emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2);
static void
emit_simd_gt_un_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
switch (type) {
case MONO_TYPE_U2:
case MONO_TYPE_U4:
if (mono_hwcap_x86_has_sse41)
goto USE_MAX;
goto USE_SIGNED_GT;
case MONO_TYPE_U1:
USE_MAX: {
// dreg = max(sreg1, sreg2) != sreg2
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
int temp_reg3 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (type), temp_reg1, sreg1, sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, temp_reg1, ins->sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg3, -1, -1);
NEW_SIMD_INS (cfg, ins, temp, OP_XORPD, dreg, temp_reg2, temp_reg3);
break;
}
case MONO_TYPE_U8:
USE_SIGNED_GT: {
// convert to signed integer by subtracting (1 << (size - 1)) from each operand
// and then use signed comparison
int temp_c0 = mono_alloc_ireg (cfg);
int temp_c80 = mono_alloc_ireg (cfg);
int temp_s1 = mono_alloc_ireg (cfg);
int temp_s2 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_c0, -1, -1);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_shl_op (type), temp_c80, temp_c0, -1);
temp->inst_imm = type == MONO_TYPE_U2 ? 15 : (type == MONO_TYPE_U4 ? 31 : 63);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s1, sreg1, temp_c80);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_sub_op (type), temp_s2, sreg2, temp_c80);
emit_simd_gt_op (cfg, bb, ins, type, dreg, temp_s1, temp_s2);
break;
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
goto USE_SIGNED_GT;
#else
if (mono_hwcap_x86_has_sse41)
goto USE_MAX;
goto USE_SIGNED_GT;
#endif
}
}
}
static void
emit_simd_gt_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (!mono_hwcap_x86_has_sse42 && (type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt)) {
// Decompose 64-bit greater than to 32-bit
//
// t = (v1 > v2)
// u = (v1 == v2)
// v = (v1 > v2) unsigned
//
// z = shuffle(t, (3, 3, 1, 1))
// t1 = shuffle(v, (2, 2, 0, 0))
// u1 = shuffle(u, (3, 3, 1, 1))
// w = and(t1, u1)
// result = bitwise_or(z, w)
int temp_t = mono_alloc_ireg (cfg);
int temp_u = mono_alloc_ireg (cfg);
int temp_v = mono_alloc_ireg (cfg);
int temp_z = temp_t;
int temp_t1 = temp_v;
int temp_u1 = temp_u;
int temp_w = temp_t1;
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPGTD, temp_t, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_u, sreg1, sreg2);
emit_simd_gt_un_op (cfg, bb, ins, MONO_TYPE_U4, temp_v, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_z, temp_t, -1);
temp->inst_c0 = 0xF5;
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_t1, temp_v, -1);
temp->inst_c0 = 0xA0;
NEW_SIMD_INS (cfg, ins, temp, OP_PSHUFLED, temp_u1, temp_u, -1);
temp->inst_c0 = 0xF5;
NEW_SIMD_INS (cfg, ins, temp, OP_ANDPD, temp_w, temp_t1, temp_u1);
NEW_SIMD_INS (cfg, ins, temp, OP_ORPD, dreg, temp_z, temp_w);
} else {
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_gt_op (type), dreg, sreg1, sreg2);
}
}
static void
emit_simd_min_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) {
// SSE2, so always available
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2);
} else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) {
// Decompose to t = (s1 > s2), d = (s1 & !t) | (s2 & t)
int temp_t = mono_alloc_ireg (cfg);
int temp_d1 = mono_alloc_ireg (cfg);
int temp_d2 = mono_alloc_ireg (cfg);
if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1)
emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
else
emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d1, temp_t, sreg1);
NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d2, temp_t, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2);
} else {
// SSE 4.1 has byte- and dword- operations
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_min_op (type), dreg, sreg1, sreg2);
}
}
static void
emit_simd_max_op (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int type, int dreg, int sreg1, int sreg2)
{
MonoInst *temp;
gboolean is64BitNativeInt = FALSE;
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_I || ins->inst_c1 == MONO_TYPE_U;
#endif
if (type == MONO_TYPE_I2 || type == MONO_TYPE_U2) {
// SSE2, so always available
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2);
} else if (!mono_hwcap_x86_has_sse41 || type == MONO_TYPE_I8 || type == MONO_TYPE_U8 || is64BitNativeInt) {
// Decompose to t = (s1 > s2), d = (s1 & t) | (s2 & !t)
int temp_t = mono_alloc_ireg (cfg);
int temp_d1 = mono_alloc_ireg (cfg);
int temp_d2 = mono_alloc_ireg (cfg);
if (type == MONO_TYPE_U8 || type == MONO_TYPE_U4 || type == MONO_TYPE_U1)
emit_simd_gt_un_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
else
emit_simd_gt_op (cfg, bb, ins, type, temp_t, sreg1, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_PAND, temp_d1, temp_t, sreg1);
NEW_SIMD_INS (cfg, ins, temp, OP_PANDN, temp_d2, temp_t, sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_POR, dreg, temp_d1, temp_d2);
} else {
// SSE 4.1 has byte- and dword- operations
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_op (type), dreg, sreg1, sreg2);
}
}
/*
* mono_arch_lowering_pass:
*
* Converts complex opcodes into simpler ones so that each IR instruction
* corresponds to one machine instruction.
*/
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n, *temp;
/*
* FIXME: Need to add more instructions, but the current machine
* description can't model some parts of the composite instructions like
* cdq.
*/
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_DIV_IMM:
case OP_REM_IMM:
case OP_IDIV_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LREM_IMM:
case OP_IREM_IMM:
mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->opcode = OP_COMPARE;
ins->sreg2 = temp->dreg;
}
break;
#ifndef MONO_ARCH_ILP32
case OP_LOAD_MEMBASE:
#endif
case OP_LOADI8_MEMBASE:
/* Don't generate memindex opcodes (to simplify */
/* read sandboxing) */
if (!amd64_use_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
ins->inst_indexreg = temp->dreg;
}
break;
#ifndef MONO_ARCH_ILP32
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->opcode = OP_STOREI8_MEMBASE_REG;
ins->sreg1 = temp->dreg;
}
break;
#ifdef MONO_ARCH_SIMD_INTRINSICS
case OP_EXPAND_I1: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
int original_reg = ins->sreg1;
NEW_INS (cfg, ins, temp, OP_ICONV_TO_U1);
temp->sreg1 = original_reg;
temp->dreg = temp_reg1;
NEW_INS (cfg, ins, temp, OP_SHL_IMM);
temp->sreg1 = temp_reg1;
temp->dreg = temp_reg2;
temp->inst_imm = 8;
NEW_INS (cfg, ins, temp, OP_LOR);
temp->sreg1 = temp->dreg = temp_reg2;
temp->sreg2 = temp_reg1;
ins->opcode = OP_EXPAND_I2;
ins->sreg1 = temp_reg2;
break;
}
case OP_XEQUAL: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, OP_PCMPEQD, temp_reg1, ins->sreg1, ins->sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_EXTRACT_MASK, temp_reg2, temp_reg1, -1);
temp->type = STACK_I4;
NEW_INS (cfg, ins, temp, OP_COMPARE_IMM);
temp->sreg1 = temp_reg2;
temp->inst_imm = 0xFFFF;
temp->klass = ins->klass;
ins->opcode = OP_CEQ;
ins->sreg1 = -1;
ins->sreg2 = -1;
break;
}
case OP_XCOMPARE: {
int temp_reg;
gboolean is64BitNativeInt = FALSE;
switch (ins->inst_c0)
{
case CMP_EQ:
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case CMP_NE: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2);
NEW_SIMD_INS (cfg, ins, temp, OP_XONES, temp_reg2, -1, -1);
ins->opcode = OP_XORPD;
ins->sreg1 = temp_reg1;
ins->sreg1 = temp_reg2;
break;
}
case CMP_LT:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GT:
emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case CMP_LE:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GE: {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
emit_simd_gt_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2);
ins->opcode = OP_POR;
ins->sreg1 = temp_reg1;
ins->sreg2 = temp_reg2;
break;
}
case CMP_LE_UN:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GE_UN:
#if TARGET_SIZEOF_VOID_P == 8
is64BitNativeInt = ins->inst_c1 == MONO_TYPE_U;
#endif
if (mono_hwcap_x86_has_sse41 && ins->inst_c1 != MONO_TYPE_U8 && !is64BitNativeInt) {
int temp_reg1 = mono_alloc_ireg (cfg);
NEW_SIMD_INS (cfg, ins, temp, simd_type_to_max_un_op (ins->inst_c1), temp_reg1, ins->sreg1, ins->sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, ins->dreg, temp_reg1, ins->sreg1);
NULLIFY_INS (ins);
} else {
int temp_reg1 = mono_alloc_ireg (cfg);
int temp_reg2 = mono_alloc_ireg (cfg);
emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, temp_reg1, ins->sreg1, ins->sreg2);
emit_simd_comp_op (cfg, bb, ins, ins->inst_c1, temp_reg2, ins->sreg1, ins->sreg2);
ins->opcode = OP_POR;
ins->sreg1 = temp_reg1;
ins->sreg2 = temp_reg2;
}
break;
case CMP_LT_UN:
temp_reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = temp_reg;
case CMP_GT_UN: {
emit_simd_gt_un_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
}
default:
g_assert_not_reached();
break;
}
ins->type = STACK_VTYPE;
ins->inst_c0 = 0;
break;
}
case OP_XCOMPARE_FP: {
ins->opcode = ins->inst_c1 == MONO_TYPE_R4 ? OP_COMPPS : OP_COMPPD;
switch (ins->inst_c0)
{
case CMP_EQ: ins->inst_c0 = 0; break;
case CMP_NE: ins->inst_c0 = 4; break;
case CMP_LT: ins->inst_c0 = 1; break;
case CMP_LE: ins->inst_c0 = 2; break;
case CMP_GT: ins->inst_c0 = 6; break;
case CMP_GE: ins->inst_c0 = 5; break;
default:
g_assert_not_reached();
break;
}
break;
}
case OP_XCAST: {
ins->opcode = OP_XMOVE;
break;
}
case OP_XBINOP: {
switch (ins->inst_c0)
{
case OP_ISUB:
ins->opcode = simd_type_to_sub_op (ins->inst_c1);
break;
case OP_IADD:
ins->opcode = simd_type_to_add_op (ins->inst_c1);
break;
case OP_IAND:
ins->opcode = OP_ANDPD;
break;
case OP_IXOR:
ins->opcode = OP_XORPD;
break;
case OP_IOR:
ins->opcode = OP_ORPD;
break;
case OP_IMIN:
emit_simd_min_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case OP_IMAX:
emit_simd_max_op (cfg, bb, ins, ins->inst_c1, ins->dreg, ins->sreg1, ins->sreg2);
NULLIFY_INS (ins);
break;
case OP_FSUB:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_SUBPD : OP_SUBPS;
break;
case OP_FADD:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_ADDPD : OP_ADDPS;
break;
case OP_FDIV:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_DIVPD : OP_DIVPS;
break;
case OP_FMUL:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MULPD : OP_MULPS;
break;
case OP_FMIN:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MINPD : OP_MINPS;
break;
case OP_FMAX:
ins->opcode = ins->inst_c1 == MONO_TYPE_R8 ? OP_MAXPD : OP_MAXPS;
break;
default:
g_assert_not_reached();
break;
}
break;
}
case OP_XEXTRACT_R4:
case OP_XEXTRACT_R8:
case OP_XEXTRACT_I4:
case OP_XEXTRACT_I8: {
// TODO
g_assert_not_reached();
break;
}
#endif
default:
break;
}
}
bb->max_vreg = cfg->next_vreg;
}
static const int
branch_cc_table [] = {
X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
};
/* Maps CMP_... constants to X86_CC_... constants */
static const int
cc_table [] = {
X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT,
X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT
};
static const int
cc_signed_table [] = {
TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE
};
/*#include "cprop.c"*/
static unsigned char*
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
// Use 8 as register size to get Nan/Inf conversion to uint result truncated to 0
if (size == 8 || (!is_signed && size == 4))
amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg);
else
amd64_sse_cvttsd2si_reg_reg_size (code, dreg, sreg, 4);
if (size == 1)
amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
else if (size == 2)
amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
return code;
}
static unsigned char*
mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree)
{
int sreg = tree->sreg1;
int need_touch = FALSE;
#if defined(TARGET_WIN32)
need_touch = TRUE;
#elif defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
if (!(tree->flags & MONO_INST_INIT))
need_touch = TRUE;
#endif
if (need_touch) {
guint8* br[5];
/*
* Under Windows:
* If requested stack size is larger than one page,
* perform stack-touch operation
*/
/*
* Generate stack probe code.
* Under Windows, it is necessary to allocate one page at a time,
* "touching" stack after each successful sub-allocation. This is
* because of the way stack growth is implemented - there is a
* guard page before the lowest stack page that is currently commited.
* Stack normally grows sequentially so OS traps access to the
* guard page and commits more pages when needed.
*/
amd64_test_reg_imm (code, sreg, ~0xFFF);
br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
br[2] = code; /* loop */
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
amd64_patch (br[3], br[2]);
amd64_test_reg_reg (code, sreg, sreg);
br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
br[1] = code; x86_jump8 (code, 0);
amd64_patch (br[0], code);
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
amd64_patch (br[1], code);
amd64_patch (br[4], code);
}
else
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
if (tree->flags & MONO_INST_INIT) {
int offset = 0;
if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
amd64_push_reg (code, AMD64_RAX);
offset += 8;
}
if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
amd64_push_reg (code, AMD64_RCX);
offset += 8;
}
if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
amd64_push_reg (code, AMD64_RDI);
offset += 8;
}
amd64_shift_reg_imm (code, X86_SHR, sreg, 3);
if (sreg != AMD64_RCX)
amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
amd64_cld (code);
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
amd64_pop_reg (code, AMD64_RDI);
if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
amd64_pop_reg (code, AMD64_RCX);
if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
amd64_pop_reg (code, AMD64_RAX);
}
return code;
}
static guint8*
emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
{
CallInfo *cinfo;
guint32 quad;
/* Move return value to the target register */
/* FIXME: do this in the local reg allocator */
switch (ins->opcode) {
case OP_CALL:
case OP_CALL_REG:
case OP_CALL_MEMBASE:
case OP_LCALL:
case OP_LCALL_REG:
case OP_LCALL_MEMBASE:
g_assert (ins->dreg == AMD64_RAX);
break;
case OP_FCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE: {
MonoType *rtype = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
if (rtype->type == MONO_TYPE_R4) {
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
else {
if (ins->dreg != AMD64_XMM0)
amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
break;
}
case OP_RCALL:
case OP_RCALL_REG:
case OP_RCALL_MEMBASE:
if (ins->dreg != AMD64_XMM0)
amd64_sse_movss_reg_reg (code, ins->dreg, AMD64_XMM0);
break;
case OP_VCALL:
case OP_VCALL_REG:
case OP_VCALL_MEMBASE:
case OP_VCALL2:
case OP_VCALL2_REG:
case OP_VCALL2_MEMBASE:
cinfo = get_call_info (cfg->mempool, ((MonoCallInst*)ins)->signature);
if (cinfo->ret.storage == ArgValuetypeInReg) {
MonoInst *loc = cfg->arch.vret_addr_loc;
/* Load the destination address */
g_assert (loc->opcode == OP_REGOFFSET);
amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer));
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
case ArgInIReg:
amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof (target_mgreg_t)), cinfo->ret.pair_regs [quad], sizeof (target_mgreg_t));
break;
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
amd64_movsd_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
break;
case ArgNone:
break;
default:
NOT_IMPLEMENTED;
}
}
}
break;
}
return code;
}
#endif /* DISABLE_JIT */
#ifdef TARGET_MACH
static int tls_gs_offset;
#endif
gboolean
mono_arch_have_fast_tls (void)
{
#ifdef TARGET_MACH
static gboolean have_fast_tls = FALSE;
static gboolean inited = FALSE;
guint8 *ins;
if (mini_debug_options.use_fallback_tls)
return FALSE;
if (inited)
return have_fast_tls;
ins = (guint8*)pthread_getspecific;
/*
* We're looking for these two instructions:
*
* mov %gs:[offset](,%rdi,8),%rax
* retq
*/
have_fast_tls = ins [0] == 0x65 &&
ins [1] == 0x48 &&
ins [2] == 0x8b &&
ins [3] == 0x04 &&
ins [4] == 0xfd &&
ins [6] == 0x00 &&
ins [7] == 0x00 &&
ins [8] == 0x00 &&
ins [9] == 0xc3;
tls_gs_offset = ins[5];
/*
* Apple now loads a different version of pthread_getspecific when launched from Xcode
* For that version we're looking for these instructions:
*
* pushq %rbp
* movq %rsp, %rbp
* mov %gs:[offset](,%rdi,8),%rax
* popq %rbp
* retq
*/
if (!have_fast_tls) {
have_fast_tls = ins [0] == 0x55 &&
ins [1] == 0x48 &&
ins [2] == 0x89 &&
ins [3] == 0xe5 &&
ins [4] == 0x65 &&
ins [5] == 0x48 &&
ins [6] == 0x8b &&
ins [7] == 0x04 &&
ins [8] == 0xfd &&
ins [10] == 0x00 &&
ins [11] == 0x00 &&
ins [12] == 0x00 &&
ins [13] == 0x5d &&
ins [14] == 0xc3;
tls_gs_offset = ins[9];
}
inited = TRUE;
return have_fast_tls;
#elif defined(TARGET_ANDROID)
return FALSE;
#else
if (mini_debug_options.use_fallback_tls)
return FALSE;
return TRUE;
#endif
}
int
mono_amd64_get_tls_gs_offset (void)
{
#ifdef TARGET_OSX
return tls_gs_offset;
#else
g_assert_not_reached ();
return -1;
#endif
}
/*
* \param code buffer to store code to
* \param dreg hard register where to place the result
* \param tls_offset offset info
* \return a pointer to the end of the stored code
*
* mono_amd64_emit_tls_get emits in \p code the native code that puts in
* the dreg register the item in the thread local storage identified
* by tls_offset.
*/
static guint8*
mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
#ifdef TARGET_WIN32
if (tls_offset < 64) {
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8);
} else {
guint8 *buf [16];
g_assert (tls_offset < 0x440);
/* Load TEB->TlsExpansionSlots */
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, 0x1780, 8);
amd64_test_reg_reg (code, dreg, dreg);
buf [0] = code;
amd64_branch (code, X86_CC_EQ, code, TRUE);
amd64_mov_reg_membase (code, dreg, dreg, (tls_offset * 8) - 0x200, 8);
amd64_patch (buf [0], code);
}
#elif defined(TARGET_MACH)
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 8), 8);
#else
if (optimize_for_xen) {
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_reg_mem (code, dreg, 0, 8);
amd64_mov_reg_membase (code, dreg, dreg, tls_offset, 8);
} else {
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_reg_mem (code, dreg, tls_offset, 8);
}
#endif
return code;
}
static guint8*
mono_amd64_emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
#ifdef TARGET_WIN32
g_assert_not_reached ();
#elif defined(TARGET_MACH)
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_mem_reg (code, tls_gs_offset + (tls_offset * 8), sreg, 8);
#else
g_assert (!optimize_for_xen);
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_mem_reg (code, tls_offset, sreg, 8);
#endif
return code;
}
/*
* emit_setup_lmf:
*
* Emit code to initialize an LMF structure at LMF_OFFSET.
*/
static guint8*
emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset)
{
/*
* The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
*/
/*
* sp is saved right before calls but we need to save it here too so
* async stack walks would work.
*/
amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
/* Save rbp */
amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_RBP, 8);
if (cfg->arch.omit_fp && cfa_offset != -1)
mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - (cfa_offset - (lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp))));
/* These can't contain refs */
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
/* These are handled automatically by the stack marking code */
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
return code;
}
#ifdef TARGET_WIN32
#define TEB_LAST_ERROR_OFFSET 0x68
static guint8*
emit_get_last_error (guint8* code, int dreg)
{
/* Threads last error value is located in TEB_LAST_ERROR_OFFSET. */
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, TEB_LAST_ERROR_OFFSET, sizeof (guint32));
return code;
}
#else
static guint8*
emit_get_last_error (guint8* code, int dreg)
{
g_assert_not_reached ();
}
#endif
/* benchmark and set based on cpu */
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
#ifndef DISABLE_JIT
static guint8*
amd64_handle_varargs_nregs (guint8 *code, guint32 nregs)
{
#ifndef TARGET_WIN32
if (nregs)
amd64_mov_reg_imm (code, AMD64_RAX, nregs);
else
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
#endif
return code;
}
static guint8*
amd64_handle_varargs_call (MonoCompile *cfg, guint8 *code, MonoCallInst *call, gboolean free_rax)
{
#ifdef TARGET_WIN32
return code;
#else
/*
* The AMD64 ABI forces callers to know about varargs.
*/
guint32 nregs = 0;
if (call->signature->call_convention == MONO_CALL_VARARG && call->signature->pinvoke) {
// deliberatly nothing -- but nreg = 0 and do not return
} else if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && m_class_get_image (cfg->method->klass) != mono_defaults.corlib) {
/*
* Since the unmanaged calling convention doesn't contain a
* 'vararg' entry, we have to treat every pinvoke call as a
* potential vararg call.
*/
for (guint32 i = 0; i < AMD64_XMM_NREG; ++i)
nregs += (call->used_fregs & (1 << i)) != 0;
} else {
return code;
}
MonoInst *ins = (MonoInst*)call;
if (free_rax && ins->sreg1 == AMD64_RAX) {
amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
ins->sreg1 = AMD64_R11;
}
return amd64_handle_varargs_nregs (code, nregs);
#endif
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
/* Fix max_offset estimate for each successor bb */
gboolean optimize_branch_pred = (cfg->opt & MONO_OPT_BRANCH) && (cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS);
if (optimize_branch_pred) {
int current_offset = cfg->code_len;
MonoBasicBlock *current_bb;
for (current_bb = bb; current_bb != NULL; current_bb = current_bb->next_bb) {
current_bb->max_offset = current_offset;
current_offset += current_bb->max_length;
}
}
if (cfg->opt & MONO_OPT_LOOP) {
int pad, align = LOOP_ALIGNMENT;
/* set alignment depending on cpu */
if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
pad = align - pad;
/*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
amd64_padding (code, pad);
cfg->code_len += pad;
bb->native_offset = cfg->code_len;
}
}
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
set_code_cursor (cfg, code);
mono_debug_open_block (cfg, bb, code - cfg->native_code);
if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num)
x86_breakpoint (code);
MONO_BB_FOR_EACH_INS (bb, ins) {
const guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
int max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
if (cfg->debug_info)
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
case OP_BIGMUL:
amd64_mul_reg (code, ins->sreg2, TRUE);
break;
case OP_BIGMUL_UN:
amd64_mul_reg (code, ins->sreg2, FALSE);
break;
case OP_X86_SETEQ_MEMBASE:
amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
break;
case OP_STOREI1_MEMBASE_IMM:
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
break;
case OP_STOREI2_MEMBASE_IMM:
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
break;
case OP_STOREI4_MEMBASE_IMM:
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_STOREI1_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 1);
break;
case OP_STOREI2_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 2);
break;
/* In AMD64 NaCl, pointers are 4 bytes, */
/* so STORE_* != STOREI8_*. Likewise below. */
case OP_STORE_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, sizeof(gpointer));
break;
case OP_STOREI8_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 8);
break;
case OP_STOREI4_MEMBASE_REG:
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, 4);
break;
case OP_STORE_MEMBASE_IMM:
/* In NaCl, this could be a PCONST type, which could */
/* mean a pointer type was copied directly into the */
/* lower 32-bits of inst_imm, so for InvalidPtr==-1 */
/* the value would be 0x00000000FFFFFFFF which is */
/* not proper for an imm32 unless you cast it. */
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, (gint32)ins->inst_imm, sizeof(gpointer));
break;
case OP_STOREI8_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_LOAD_MEM:
#ifdef MONO_ARCH_ILP32
/* In ILP32, pointers are 4 bytes, so separate these */
/* cases, use literal 8 below where we really want 8 */
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer));
break;
#endif
case OP_LOADI8_MEM:
// FIXME: Decompose this earlier
if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
else {
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
}
break;
case OP_LOADI4_MEM:
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_movsxd_reg_membase (code, ins->dreg, ins->dreg, 0);
break;
case OP_LOADU4_MEM:
// FIXME: Decompose this earlier
if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
else {
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
}
break;
case OP_LOADU1_MEM:
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, FALSE);
break;
case OP_LOADU2_MEM:
/* For NaCl, pointers are 4 bytes, so separate these */
/* cases, use literal 8 below where we really want 8 */
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_widen_membase (code, ins->dreg, ins->dreg, 0, FALSE, TRUE);
break;
case OP_LOAD_MEMBASE:
g_assert (amd64_is_imm32 (ins->inst_offset));
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof(gpointer));
break;
case OP_LOADI8_MEMBASE:
/* Use literal 8 instead of sizeof pointer or */
/* register, we really want 8 for this opcode */
g_assert (amd64_is_imm32 (ins->inst_offset));
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 8);
break;
case OP_LOADI4_MEMBASE:
amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU4_MEMBASE:
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
break;
case OP_LOADU1_MEMBASE:
/* The cpu zero extends the result into 64 bits */
amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE, 4);
break;
case OP_LOADI1_MEMBASE:
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
break;
case OP_LOADU2_MEMBASE:
/* The cpu zero extends the result into 64 bits */
amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE, 4);
break;
case OP_LOADI2_MEMBASE:
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
break;
case OP_AMD64_LOADI8_MEMINDEX:
amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, ins->inst_indexreg, 0, 8);
break;
case OP_LCONV_TO_I1:
case OP_ICONV_TO_I1:
case OP_SEXT_I1:
amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
break;
case OP_LCONV_TO_I2:
case OP_ICONV_TO_I2:
case OP_SEXT_I2:
amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
break;
case OP_LCONV_TO_U1:
case OP_ICONV_TO_U1:
amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, FALSE);
break;
case OP_LCONV_TO_U2:
case OP_ICONV_TO_U2:
amd64_widen_reg (code, ins->dreg, ins->sreg1, FALSE, TRUE);
break;
case OP_ZEXT_I4:
/* Clean out the upper word */
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
break;
case OP_SEXT_I4:
amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_COMPARE:
case OP_LCOMPARE:
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
#if defined(MONO_ARCH_ILP32)
/* Comparison of pointer immediates should be 4 bytes to avoid sign-extend problems */
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
break;
#endif
case OP_LCOMPARE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
break;
case OP_X86_COMPARE_REG_MEMBASE:
amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
break;
case OP_X86_TEST_NULL:
amd64_test_reg_reg_size (code, ins->sreg1, ins->sreg1, 4);
break;
case OP_AMD64_TEST_NULL:
amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
break;
case OP_X86_ADD_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_SUB_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_AND_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_OR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_XOR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_X86_ADD_MEMBASE_IMM:
/* FIXME: Make a 64 version too */
amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_SUB_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_AND_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_OR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_XOR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_X86_ADD_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_SUB_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_AND_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_OR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_XOR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_X86_INC_MEMBASE:
amd64_inc_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
break;
case OP_X86_INC_REG:
amd64_inc_reg_size (code, ins->dreg, 4);
break;
case OP_X86_DEC_MEMBASE:
amd64_dec_membase_size (code, ins->inst_basereg, ins->inst_offset, 4);
break;
case OP_X86_DEC_REG:
amd64_dec_reg_size (code, ins->dreg, 4);
break;
case OP_X86_MUL_REG_MEMBASE:
case OP_X86_MUL_MEMBASE_REG:
amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_AMD64_ICOMPARE_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 4);
break;
case OP_AMD64_ICOMPARE_MEMBASE_IMM:
amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_AMD64_COMPARE_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_COMPARE_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_X86_COMPARE_MEMBASE8_IMM:
amd64_alu_membase8_imm_size (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 4);
break;
case OP_AMD64_ICOMPARE_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_AMD64_COMPARE_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_ADD_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_SUB_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_AND_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_AND, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_OR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_OR, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_XOR_REG_MEMBASE:
amd64_alu_reg_membase_size (code, X86_XOR, ins->sreg1, ins->sreg2, ins->inst_offset, 8);
break;
case OP_AMD64_ADD_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_SUB_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_AND_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_OR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_XOR_MEMBASE_REG:
amd64_alu_membase_reg_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->sreg2, 8);
break;
case OP_AMD64_ADD_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_SUB_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_AND_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_AND, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_OR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_OR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_AMD64_XOR_MEMBASE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_membase_imm_size (code, X86_XOR, ins->inst_basereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_BREAK:
amd64_breakpoint (code);
break;
case OP_RELAXED_NOP:
x86_prefix (code, X86_REP_PREFIX);
x86_nop (code);
break;
case OP_HARD_NOP:
x86_nop (code);
break;
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_I8CONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
case OP_SEQ_POINT: {
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
MonoInst *var = cfg->arch.ss_tramp_var;
guint8 *label;
/* Load ss_tramp_var */
/* This is equal to &ss_trampoline */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
/* Load the trampoline address */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
/* Call it if it is non-null */
amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
label = code;
amd64_branch8 (code, X86_CC_Z, 0, FALSE);
amd64_call_reg (code, AMD64_R11);
amd64_patch (label, code);
}
/*
* This is the address which is saved in seq points,
*/
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
if (cfg->compile_aot) {
const guint32 offset = code - cfg->native_code;
guint32 val;
MonoInst *info_var = cfg->arch.seq_point_info_var;
guint8 *label;
/* Load info var */
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
val = ((offset) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either NULL or the address of the breakpoint trampoline */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8);
amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
label = code;
amd64_branch8 (code, X86_CC_Z, 0, FALSE);
/* Call the trampoline */
amd64_call_reg (code, AMD64_R11);
amd64_patch (label, code);
} else {
MonoInst *var = cfg->arch.bp_tramp_var;
guint8 *label;
/*
* Emit a test+branch against a constant, the constant will be overwritten
* by mono_arch_set_breakpoint () to cause the test to fail.
*/
amd64_mov_reg_imm (code, AMD64_R11, 0);
amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
label = code;
amd64_branch8 (code, X86_CC_Z, 0, FALSE);
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
/* Load bp_tramp_var */
/* This is equal to &bp_trampoline */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
/* Call the trampoline */
amd64_call_membase (code, AMD64_R11, 0);
amd64_patch (label, code);
}
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
* to another IL offset.
*/
x86_nop (code);
break;
}
case OP_ADDCC:
case OP_LADDCC:
case OP_LADD:
amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
break;
case OP_ADC:
amd64_alu_reg_reg (code, X86_ADC, ins->sreg1, ins->sreg2);
break;
case OP_ADD_IMM:
case OP_LADD_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, ins->inst_imm);
break;
case OP_ADC_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
break;
case OP_SUBCC:
case OP_LSUBCC:
case OP_LSUB:
amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
break;
case OP_SBB:
amd64_alu_reg_reg (code, X86_SBB, ins->sreg1, ins->sreg2);
break;
case OP_SUB_IMM:
case OP_LSUB_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_SUB, ins->dreg, ins->inst_imm);
break;
case OP_SBB_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_SBB, ins->dreg, ins->inst_imm);
break;
case OP_LAND:
amd64_alu_reg_reg (code, X86_AND, ins->sreg1, ins->sreg2);
break;
case OP_AND_IMM:
case OP_LAND_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
break;
case OP_LMUL:
amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
case OP_IMUL_IMM: {
guint32 size = (ins->opcode == OP_IMUL_IMM) ? 4 : 8;
switch (ins->inst_imm) {
case 2:
/* MOV r1, r2 */
/* ADD r1, r1 */
if (ins->dreg != ins->sreg1)
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, size);
amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
break;
case 3:
/* LEA r1, [r2 + r2*2] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
break;
case 5:
/* LEA r1, [r2 + r2*4] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
break;
case 6:
/* LEA r1, [r2 + r2*2] */
/* ADD r1, r1 */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
break;
case 9:
/* LEA r1, [r2 + r2*8] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
break;
case 10:
/* LEA r1, [r2 + r2*4] */
/* ADD r1, r1 */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
break;
case 12:
/* LEA r1, [r2 + r2*2] */
/* SHL r1, 2 */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
break;
case 25:
/* LEA r1, [r2 + r2*4] */
/* LEA r1, [r1 + r1*4] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
break;
case 100:
/* LEA r1, [r2 + r2*4] */
/* SHL r1, 2 */
/* LEA r1, [r1 + r1*4] */
amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
break;
default:
amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, size);
break;
}
break;
}
case OP_LDIV:
case OP_LREM:
/* Regalloc magic makes the div/rem cases the same */
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_cdq (code);
amd64_div_membase (code, AMD64_RSP, -8, TRUE);
} else {
amd64_cdq (code);
amd64_div_reg (code, ins->sreg2, TRUE);
}
break;
case OP_LDIV_UN:
case OP_LREM_UN:
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_membase (code, AMD64_RSP, -8, FALSE);
} else {
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_reg (code, ins->sreg2, FALSE);
}
break;
case OP_IDIV:
case OP_IREM:
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_cdq_size (code, 4);
amd64_div_membase_size (code, AMD64_RSP, -8, TRUE, 4);
} else {
amd64_cdq_size (code, 4);
amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
}
break;
case OP_IDIV_UN:
case OP_IREM_UN:
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_membase_size (code, AMD64_RSP, -8, FALSE, 4);
} else {
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
}
break;
case OP_LMUL_OVF:
amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
case OP_LOR:
amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
break;
case OP_OR_IMM:
case OP_LOR_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
break;
case OP_LXOR:
amd64_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
break;
case OP_XOR_IMM:
case OP_LXOR_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_XOR, ins->sreg1, ins->inst_imm);
break;
case OP_LSHL:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg (code, X86_SHL, ins->dreg);
break;
case OP_LSHR:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg (code, X86_SAR, ins->dreg);
break;
case OP_SHR_IMM:
case OP_LSHR_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
break;
case OP_SHR_UN_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
break;
case OP_LSHR_UN_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_imm);
break;
case OP_LSHR_UN:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg (code, X86_SHR, ins->dreg);
break;
case OP_SHL_IMM:
case OP_LSHL_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
break;
case OP_IADDCC:
case OP_IADD:
amd64_alu_reg_reg_size (code, X86_ADD, ins->sreg1, ins->sreg2, 4);
break;
case OP_IADC:
amd64_alu_reg_reg_size (code, X86_ADC, ins->sreg1, ins->sreg2, 4);
break;
case OP_IADD_IMM:
amd64_alu_reg_imm_size (code, X86_ADD, ins->dreg, ins->inst_imm, 4);
break;
case OP_IADC_IMM:
amd64_alu_reg_imm_size (code, X86_ADC, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISUBCC:
case OP_ISUB:
amd64_alu_reg_reg_size (code, X86_SUB, ins->sreg1, ins->sreg2, 4);
break;
case OP_ISBB:
amd64_alu_reg_reg_size (code, X86_SBB, ins->sreg1, ins->sreg2, 4);
break;
case OP_ISUB_IMM:
amd64_alu_reg_imm_size (code, X86_SUB, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISBB_IMM:
amd64_alu_reg_imm_size (code, X86_SBB, ins->dreg, ins->inst_imm, 4);
break;
case OP_IAND:
amd64_alu_reg_reg_size (code, X86_AND, ins->sreg1, ins->sreg2, 4);
break;
case OP_IAND_IMM:
amd64_alu_reg_imm_size (code, X86_AND, ins->sreg1, ins->inst_imm, 4);
break;
case OP_IOR:
amd64_alu_reg_reg_size (code, X86_OR, ins->sreg1, ins->sreg2, 4);
break;
case OP_IOR_IMM:
amd64_alu_reg_imm_size (code, X86_OR, ins->sreg1, ins->inst_imm, 4);
break;
case OP_IXOR:
amd64_alu_reg_reg_size (code, X86_XOR, ins->sreg1, ins->sreg2, 4);
break;
case OP_IXOR_IMM:
amd64_alu_reg_imm_size (code, X86_XOR, ins->sreg1, ins->inst_imm, 4);
break;
case OP_INEG:
amd64_neg_reg_size (code, ins->sreg1, 4);
break;
case OP_INOT:
amd64_not_reg_size (code, ins->sreg1, 4);
break;
case OP_ISHL:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg_size (code, X86_SHL, ins->dreg, 4);
break;
case OP_ISHR:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg_size (code, X86_SAR, ins->dreg, 4);
break;
case OP_ISHR_IMM:
amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISHR_UN_IMM:
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, ins->inst_imm, 4);
break;
case OP_ISHR_UN:
g_assert (ins->sreg2 == AMD64_RCX);
amd64_shift_reg_size (code, X86_SHR, ins->dreg, 4);
break;
case OP_ISHL_IMM:
amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
break;
case OP_IMUL:
amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
break;
case OP_IMUL_OVF:
amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
case OP_IMUL_OVF_UN:
case OP_LMUL_OVF_UN: {
/* the mul operation and the exception check should most likely be split */
int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8;
/*g_assert (ins->sreg2 == X86_EAX);
g_assert (ins->dreg == X86_EAX);*/
if (ins->sreg2 == X86_EAX) {
non_eax_reg = ins->sreg1;
} else if (ins->sreg1 == X86_EAX) {
non_eax_reg = ins->sreg2;
} else {
/* no need to save since we're going to store to it anyway */
if (ins->dreg != X86_EAX) {
saved_eax = TRUE;
amd64_push_reg (code, X86_EAX);
}
amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size);
non_eax_reg = ins->sreg2;
}
if (ins->dreg == X86_EDX) {
if (!saved_eax) {
saved_eax = TRUE;
amd64_push_reg (code, X86_EAX);
}
} else {
saved_edx = TRUE;
amd64_push_reg (code, X86_EDX);
}
amd64_mul_reg_size (code, non_eax_reg, FALSE, size);
/* save before the check since pop and mov don't change the flags */
if (ins->dreg != X86_EAX)
amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size);
if (saved_edx)
amd64_pop_reg (code, X86_EDX);
if (saved_eax)
amd64_pop_reg (code, X86_EAX);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
}
case OP_ICOMPARE:
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
break;
case OP_ICOMPARE_IMM:
amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
break;
case OP_IBEQ:
case OP_IBLT:
case OP_IBGT:
case OP_IBGE:
case OP_IBLE:
case OP_LBEQ:
case OP_LBLT:
case OP_LBGT:
case OP_LBGE:
case OP_LBLE:
case OP_IBNE_UN:
case OP_IBLT_UN:
case OP_IBGT_UN:
case OP_IBGE_UN:
case OP_IBLE_UN:
case OP_LBNE_UN:
case OP_LBLT_UN:
case OP_LBGT_UN:
case OP_LBGE_UN:
case OP_LBLE_UN:
EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
break;
case OP_CMOV_IEQ:
case OP_CMOV_IGE:
case OP_CMOV_IGT:
case OP_CMOV_ILE:
case OP_CMOV_ILT:
case OP_CMOV_INE_UN:
case OP_CMOV_IGE_UN:
case OP_CMOV_IGT_UN:
case OP_CMOV_ILE_UN:
case OP_CMOV_ILT_UN:
case OP_CMOV_LEQ:
case OP_CMOV_LGE:
case OP_CMOV_LGT:
case OP_CMOV_LLE:
case OP_CMOV_LLT:
case OP_CMOV_LNE_UN:
case OP_CMOV_LGE_UN:
case OP_CMOV_LGT_UN:
case OP_CMOV_LLE_UN:
case OP_CMOV_LLT_UN:
g_assert (ins->dreg == ins->sreg1);
/* This needs to operate on 64 bit values */
amd64_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2);
break;
case OP_LNOT:
amd64_not_reg (code, ins->sreg1);
break;
case OP_LNEG:
amd64_neg_reg (code, ins->sreg1);
break;
case OP_ICONST:
case OP_I8CONST:
if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_debug_options.single_imm_size)
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
else
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
break;
case OP_AOTCONST:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, sizeof(gpointer));
break;
case OP_JUMP_TABLE:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
amd64_mov_reg_imm_size (code, ins->dreg, 0, 8);
break;
case OP_MOVE:
if (ins->dreg != ins->sreg1)
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (target_mgreg_t));
break;
case OP_AMD64_SET_XMMREG_R4: {
if (cfg->r4fp) {
if (ins->dreg != ins->sreg1)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1);
} else {
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
}
break;
}
case OP_AMD64_SET_XMMREG_R8: {
if (ins->dreg != ins->sreg1)
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
}
case OP_TAILCALL_PARAMETER:
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL:
case OP_TAILCALL_REG:
case OP_TAILCALL_MEMBASE: {
call = (MonoCallInst*)ins;
int i, save_area_offset;
gboolean tailcall_membase = (ins->opcode == OP_TAILCALL_MEMBASE);
gboolean tailcall_reg = (ins->opcode == OP_TAILCALL_REG);
g_assert (!cfg->method->save_lmf);
max_len += AMD64_NREG * 4;
max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
code = realloc_code (cfg, max_len);
// FIXME hardcoding RAX here is not ideal.
if (tailcall_reg) {
int const reg = ins->sreg1;
g_assert (reg > -1);
if (reg != AMD64_RAX)
amd64_mov_reg_reg (code, AMD64_RAX, reg, 8);
} else if (tailcall_membase) {
int const reg = ins->sreg1;
g_assert (reg > -1);
amd64_mov_reg_membase (code, AMD64_RAX, reg, ins->inst_offset, 8);
} else {
if (cfg->compile_aot) {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
} else {
// FIXME Patch data instead of code.
guint32 pad_size = (guint32)((code + 2 - cfg->native_code) % 8);
if (pad_size)
amd64_padding (code, 8 - pad_size);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
amd64_set_reg_template (code, AMD64_RAX);
}
}
/* Restore callee saved registers */
save_area_offset = cfg->arch.reg_save_area_offset;
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & ((regmask_t)1 << i))) {
amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8);
save_area_offset += 8;
}
if (cfg->arch.omit_fp) {
if (cfg->arch.stack_alloc_size)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
// FIXME:
if (call->stack_usage)
NOT_IMPLEMENTED;
} else {
amd64_push_reg (code, AMD64_RAX);
/* Copy arguments on the stack to our argument area */
// FIXME use rep mov for constant code size, before nonvolatiles
// restored, first saving rsi, rdi into volatiles
for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i + 8, sizeof (target_mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, ARGS_OFFSET + i, AMD64_RAX, sizeof (target_mgreg_t));
}
amd64_pop_reg (code, AMD64_RAX);
#ifdef TARGET_WIN32
amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
amd64_pop_reg (code, AMD64_RBP);
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
#else
amd64_leave (code);
#endif
}
#ifdef TARGET_WIN32
// Redundant REX byte indicates a tailcall to the native unwinder. It means nothing to the processor.
// https://github.com/dotnet/coreclr/blob/966dabb5bb3c4bf1ea885e1e8dc6528e8c64dc4f/src/unwinder/amd64/unwinder_amd64.cpp#L1394
// FIXME This should be jmp rip+32 for AOT direct to same assembly.
// FIXME This should be jmp [rip+32] for AOT direct to not-same assembly (through data).
// FIXME This should be jmp [rip+32] for JIT direct -- patch data instead of code.
// This is only close to ideal for tailcall_membase, and even then it should
// have a more dynamic register allocation.
x86_imm_emit8 (code, 0x48);
amd64_jump_reg (code, AMD64_RAX);
#else
// NT does not have varargs rax use, and NT ABI does not have red zone.
// Use red-zone mov/jmp instead of push/ret to preserve call/ret speculation stack.
// FIXME Just like NT the direct cases are are not ideal.
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8);
code = amd64_handle_varargs_call (cfg, code, call, FALSE);
amd64_jump_membase (code, AMD64_RSP, -8);
#endif
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
amd64_alu_membase_imm_size (code, X86_CMP, ins->sreg1, 0, 0, 4);
break;
case OP_ARGLIST: {
amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie);
amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, sizeof(gpointer));
break;
}
case OP_CALL:
case OP_FCALL:
case OP_RCALL:
case OP_LCALL:
case OP_VCALL:
case OP_VCALL2:
case OP_VOIDCALL:
call = (MonoCallInst*)ins;
code = amd64_handle_varargs_call (cfg, code, call, FALSE);
code = emit_call (cfg, call, code, MONO_JIT_ICALL_ZeroIsReserved);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
call = (MonoCallInst*)ins;
if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
ins->sreg1 = AMD64_R11;
}
code = amd64_handle_varargs_call (cfg, code, call, TRUE);
amd64_call_reg (code, ins->sreg1);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_DYN_CALL: {
int i, limit_reg, index_reg, src_reg, dst_reg;
MonoInst *var = cfg->dyn_call_var;
guint8 *label;
guint8 *buf [16];
g_assert (var->opcode == OP_REGOFFSET);
/* r11 = args buffer filled by mono_arch_get_dyn_call_args () */
amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
/* r10 = ftn */
amd64_mov_reg_reg (code, AMD64_R10, ins->sreg2, 8);
/* Save args buffer */
amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8);
/* Set fp arg regs */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, has_fp), sizeof (target_mgreg_t));
amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
label = code;
amd64_branch8 (code, X86_CC_Z, -1, 1);
for (i = 0; i < FLOAT_PARAM_REGS; ++i)
amd64_sse_movsd_reg_membase (code, i, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + (i * sizeof (double)));
amd64_patch (label, code);
/* Allocate param area */
/* This doesn't need to be freed since OP_DYN_CALL is never called in a loop */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8);
amd64_shift_reg_imm (code, X86_SHL, AMD64_RAX, 3);
amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_RAX);
/* Set stack args */
/* rax/rcx/rdx/r8/r9 is scratch */
limit_reg = AMD64_RAX;
index_reg = AMD64_RCX;
src_reg = AMD64_R8;
dst_reg = AMD64_R9;
amd64_mov_reg_membase (code, limit_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, nstack_args), 8);
amd64_mov_reg_imm (code, index_reg, 0);
amd64_lea_membase (code, src_reg, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS) * sizeof (target_mgreg_t)));
amd64_mov_reg_reg (code, dst_reg, AMD64_RSP, 8);
buf [0] = code;
x86_jump8 (code, 0);
buf [1] = code;
amd64_mov_reg_membase (code, AMD64_RDX, src_reg, 0, 8);
amd64_mov_membase_reg (code, dst_reg, 0, AMD64_RDX, 8);
amd64_alu_reg_imm (code, X86_ADD, index_reg, 1);
amd64_alu_reg_imm (code, X86_ADD, src_reg, 8);
amd64_alu_reg_imm (code, X86_ADD, dst_reg, 8);
amd64_patch (buf [0], code);
amd64_alu_reg_reg (code, X86_CMP, index_reg, limit_reg);
buf [2] = code;
x86_branch8 (code, X86_CC_LT, 0, FALSE);
amd64_patch (buf [2], buf [1]);
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
/* Make the call */
amd64_call_reg (code, AMD64_R10);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
/* Save result */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs), AMD64_XMM0);
amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + sizeof (double), AMD64_XMM1);
break;
}
case OP_AMD64_SAVE_SP_TO_LMF: {
MonoInst *lmf_var = cfg->lmf_var;
amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
break;
}
case OP_X86_PUSH:
g_assert_not_reached ();
amd64_push_reg (code, ins->sreg1);
break;
case OP_X86_PUSH_IMM:
g_assert_not_reached ();
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_push_imm (code, ins->inst_imm);
break;
case OP_X86_PUSH_MEMBASE:
g_assert_not_reached ();
amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
break;
case OP_X86_PUSH_OBJ: {
int size = ALIGN_TO (ins->inst_imm, 8);
g_assert_not_reached ();
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_push_reg (code, AMD64_RDI);
amd64_push_reg (code, AMD64_RSI);
amd64_push_reg (code, AMD64_RCX);
if (ins->inst_offset)
amd64_lea_membase (code, AMD64_RSI, ins->inst_basereg, ins->inst_offset);
else
amd64_mov_reg_reg (code, AMD64_RSI, ins->inst_basereg, 8);
amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, (3 * 8));
amd64_mov_reg_imm (code, AMD64_RCX, (size >> 3));
amd64_cld (code);
amd64_prefix (code, X86_REP_PREFIX);
amd64_movsd (code);
amd64_pop_reg (code, AMD64_RCX);
amd64_pop_reg (code, AMD64_RSI);
amd64_pop_reg (code, AMD64_RDI);
break;
}
case OP_GENERIC_CLASS_INIT: {
guint8 *jump;
g_assert (ins->sreg1 == MONO_AMD64_ARG_REG1);
amd64_test_membase_imm_size (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoVTable, initialized), 1, 1);
jump = code;
amd64_branch8 (code, X86_CC_NZ, -1, 1);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_generic_class_init);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
x86_patch (jump, code);
break;
}
case OP_X86_LEA:
amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
break;
case OP_X86_LEA_MEMBASE:
amd64_lea4_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
case OP_AMD64_LEA_MEMBASE:
amd64_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
case OP_X86_XCHG:
amd64_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
break;
case OP_LOCALLOC:
/* keep alignment */
amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
code = mono_emit_stack_alloc (cfg, code, ins);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
break;
case OP_LOCALLOC_IMM: {
guint32 size = ins->inst_imm;
size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
if (ins->flags & MONO_INST_INIT) {
if (size < 64) {
int i;
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
for (i = 0; i < size; i += 8)
amd64_mov_membase_reg (code, AMD64_RSP, i, ins->dreg, 8);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
} else {
amd64_mov_reg_imm (code, ins->dreg, size);
ins->sreg1 = ins->dreg;
code = mono_emit_stack_alloc (cfg, code, ins);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
}
} else {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
}
if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
break;
}
case OP_THROW: {
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_exception);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_rethrow_exception);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
/* Align stack */
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
amd64_call_imm (code, 0);
/*
* ins->inst_eh_blocks and bb->clause_holes are part of same GList.
* Holes from bb->clause_holes will be added separately for the entire
* basic block. Add only the rest of them.
*/
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
/* Restore stack alignment */
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
break;
case OP_START_HANDLER: {
/* Even though we're saving RSP, use sizeof */
/* gpointer because spvar is of type IntPtr */
/* see: mono_create_spvar_for_region */
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, sizeof(gpointer));
if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) ||
MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FILTER) ||
MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FAULT)) &&
cfg->param_area) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
}
break;
}
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
amd64_ret (code);
break;
}
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
amd64_mov_reg_membase (code, AMD64_RSP, spvar->inst_basereg, spvar->inst_offset, sizeof(gpointer));
/* The local allocator will put the result into RAX */
amd64_ret (code);
break;
}
case OP_GET_EX_OBJ:
if (ins->dreg != AMD64_RAX)
amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, sizeof (target_mgreg_t));
break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
case OP_BR:
//g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
//if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
//break;
if (ins->inst_target_bb->native_offset) {
amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
if (optimize_branch_pred &&
x86_is_imm8 (ins->inst_target_bb->max_offset - offset))
x86_jump8 (code, 0);
else
x86_jump32 (code, 0);
}
break;
case OP_BR_REG:
amd64_jump_reg (code, ins->sreg1);
break;
case OP_ICNEQ:
case OP_ICGE:
case OP_ICLE:
case OP_ICGE_UN:
case OP_ICLE_UN:
case OP_CEQ:
case OP_LCEQ:
case OP_ICEQ:
case OP_CLT:
case OP_LCLT:
case OP_ICLT:
case OP_CGT:
case OP_ICGT:
case OP_LCGT:
case OP_CLT_UN:
case OP_LCLT_UN:
case OP_ICLT_UN:
case OP_CGT_UN:
case OP_LCGT_UN:
case OP_ICGT_UN:
amd64_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_LT:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_GT:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_GE:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_LE:
case OP_COND_EXC_LE_UN:
case OP_COND_EXC_IEQ:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_ILT:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_IGT:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_IGE:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_ILE:
case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], (const char *)ins->inst_p1);
break;
case OP_COND_EXC_OV:
case OP_COND_EXC_NO:
case OP_COND_EXC_C:
case OP_COND_EXC_NC:
EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ],
(ins->opcode < OP_COND_EXC_NE_UN), (const char *)ins->inst_p1);
break;
case OP_COND_EXC_IOV:
case OP_COND_EXC_INO:
case OP_COND_EXC_IC:
case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_IEQ],
(ins->opcode < OP_COND_EXC_INE_UN), (const char *)ins->inst_p1);
break;
/* floating point opcodes */
case OP_R8CONST: {
double d = *(double *)ins->inst_p0;
if ((d == 0.0) && (mono_signbit (d) == 0)) {
amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
} else if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, ins->inst_p0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer));
amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_R11, 0);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
amd64_sse_movsd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
break;
}
case OP_R4CONST: {
float f = *(float *)ins->inst_p0;
if ((f == 0.0) && (mono_signbit (f) == 0)) {
if (cfg->r4fp)
amd64_sse_xorps_reg_reg (code, ins->dreg, ins->dreg);
else
amd64_sse_xorpd_reg_reg (code, ins->dreg, ins->dreg);
} else {
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, ins->inst_p0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer));
amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_R11, 0);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
amd64_sse_movss_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
if (!cfg->r4fp)
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
}
break;
}
case OP_STORER8_MEMBASE_REG:
amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
break;
case OP_LOADR8_MEMBASE:
amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_STORER4_MEMBASE_REG:
if (cfg->r4fp) {
amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
} else {
/* This requires a double->single conversion */
amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, MONO_ARCH_FP_SCRATCH_REG);
}
break;
case OP_LOADR4_MEMBASE:
if (cfg->r4fp) {
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
} else {
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
}
break;
case OP_ICONV_TO_R4:
if (cfg->r4fp) {
amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
} else {
amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
}
break;
case OP_ICONV_TO_R8:
amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_LCONV_TO_R4:
if (cfg->r4fp) {
amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1);
} else {
amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1);
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
}
break;
case OP_LCONV_TO_R8:
amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_R4:
if (cfg->r4fp) {
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
} else {
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
}
break;
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
case OP_FCONV_TO_U1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
break;
case OP_FCONV_TO_I2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
break;
case OP_FCONV_TO_U2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
break;
case OP_FCONV_TO_U4:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
break;
case OP_FCONV_TO_I4:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
break;
case OP_FCONV_TO_I:
case OP_FCONV_TO_I8:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, TRUE);
break;
case OP_RCONV_TO_I1:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
break;
case OP_RCONV_TO_U1:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_RCONV_TO_I2:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
break;
case OP_RCONV_TO_U2:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
break;
case OP_RCONV_TO_I4:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_RCONV_TO_U4:
// Use 8 as register size to get Nan/Inf conversion result truncated to 0
amd64_sse_cvtss2si_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_I8:
case OP_RCONV_TO_I:
amd64_sse_cvtss2si_reg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_RCONV_TO_R8:
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R4:
if (ins->dreg != ins->sreg1)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_R_UN: {
guint8 *br [2];
/* Based on gcc code */
amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
br [0] = code; x86_branch8 (code, X86_CC_S, 0, TRUE);
/* Positive case */
amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
br [1] = code; x86_jump8 (code, 0);
amd64_patch (br [0], code);
/* Negative case */
/* Save to the red zone */
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8);
amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8);
amd64_mov_reg_reg (code, AMD64_RCX, ins->sreg1, 8);
amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, 1);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RAX, 1);
amd64_alu_reg_imm (code, X86_OR, AMD64_RAX, AMD64_RCX);
amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, AMD64_RAX);
amd64_sse_addsd_reg_reg (code, ins->dreg, ins->dreg);
/* Restore */
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, -8, 8);
amd64_patch (br [1], code);
break;
}
case OP_LCONV_TO_OVF_U4:
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
break;
case OP_LCONV_TO_OVF_I4_UN:
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0x7fffffff);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT, FALSE, "OverflowException");
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 8);
break;
case OP_FMOVE:
if (ins->dreg != ins->sreg1)
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RMOVE:
if (ins->dreg != ins->sreg1)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I4:
if (cfg->r4fp) {
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
} else {
amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8);
}
break;
case OP_MOVE_I4_TO_F:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
if (!cfg->r4fp)
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
break;
case OP_MOVE_F_TO_I8:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_MOVE_I8_TO_F:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_FADD:
amd64_sse_addsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FSUB:
amd64_sse_subsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FMUL:
amd64_sse_mulsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FDIV:
amd64_sse_divsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_FNEG: {
static double r8_0 = -0.0;
g_assert (ins->sreg1 == ins->dreg);
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &r8_0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t));
amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0);
amd64_sse_xorpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &r8_0);
amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
break;
}
case OP_ABS: {
static guint64 d = 0x7fffffffffffffffUL;
g_assert (ins->sreg1 == ins->dreg);
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8_GOT, &d);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t));
amd64_sse_movsd_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0);
amd64_sse_andpd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, &d);
amd64_sse_andpd_reg_membase (code, ins->dreg, AMD64_RIP, 0);
}
break;
}
case OP_SQRT:
EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1);
break;
case OP_RADD:
amd64_sse_addss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RSUB:
amd64_sse_subss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RMUL:
amd64_sse_mulss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RDIV:
amd64_sse_divss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_RNEG: {
static float r4_0 = -0.0;
g_assert (ins->sreg1 == ins->dreg);
if (cfg->compile_aot && cfg->code_exec_only) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4_GOT, &r4_0);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof (target_mgreg_t));
amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_R11, 0);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, &r4_0);
amd64_sse_movss_reg_membase (code, MONO_ARCH_FP_SCRATCH_REG, AMD64_RIP, 0);
}
amd64_sse_xorps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
break;
}
case OP_IMIN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2, 4);
break;
case OP_IMIN_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2, 4);
break;
case OP_IMAX:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2, 4);
break;
case OP_IMAX_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
amd64_cmov_reg_size (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2, 4);
break;
case OP_LMIN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_GT, TRUE, ins->dreg, ins->sreg2);
break;
case OP_LMIN_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_GT, FALSE, ins->dreg, ins->sreg2);
break;
case OP_LMAX:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_LT, TRUE, ins->dreg, ins->sreg2);
break;
case OP_LMAX_UN:
g_assert (cfg->opt & MONO_OPT_CMOV);
g_assert (ins->dreg == ins->sreg1);
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
amd64_cmov_reg (code, X86_CC_LT, FALSE, ins->dreg, ins->sreg2);
break;
case OP_X86_FPOP:
break;
case OP_FCOMPARE:
/*
* The two arguments are swapped because the fbranch instructions
* depend on this for the non-sse case to work.
*/
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
break;
case OP_RCOMPARE:
/*
* FIXME: Get rid of this.
* The two arguments are swapped because the fbranch instructions
* depend on this for the non-sse case to work.
*/
amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1);
break;
case OP_FCNEQ:
case OP_FCEQ: {
/* zeroing the register at the start results in
* shorter and faster code (we can also remove the widening op)
*/
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
if (ins->opcode == OP_FCEQ) {
amd64_set_reg (code, X86_CC_EQ, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
} else {
guchar *jump_to_end;
amd64_set_reg (code, X86_CC_NE, ins->dreg, FALSE);
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
}
break;
}
case OP_FCLT:
case OP_FCLT_UN: {
/* zeroing the register at the start results in
* shorter and faster code (we can also remove the widening op)
*/
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
if (ins->opcode == OP_FCLT_UN) {
guchar *unordered_check = code;
guchar *jump_to_end;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
} else {
amd64_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
}
break;
}
case OP_FCLE: {
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_NB, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
break;
}
case OP_FCGT:
case OP_FCGT_UN: {
/* zeroing the register at the start results in
* shorter and faster code (we can also remove the widening op)
*/
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
if (ins->opcode == OP_FCGT) {
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
} else {
amd64_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
}
break;
}
case OP_FCGE: {
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, X86_CC_NA, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
break;
}
case OP_RCEQ:
case OP_RCGT:
case OP_RCLT:
case OP_RCLT_UN:
case OP_RCGT_UN: {
int x86_cond;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comiss_reg_reg (code, ins->sreg2, ins->sreg1);
switch (ins->opcode) {
case OP_RCEQ:
x86_cond = X86_CC_EQ;
break;
case OP_RCGT:
x86_cond = X86_CC_LT;
break;
case OP_RCLT:
x86_cond = X86_CC_GT;
break;
case OP_RCLT_UN:
x86_cond = X86_CC_GT;
break;
case OP_RCGT_UN:
x86_cond = X86_CC_LT;
break;
default:
g_assert_not_reached ();
break;
}
guchar *unordered_check;
switch (ins->opcode) {
case OP_RCEQ:
case OP_RCGT:
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
amd64_patch (unordered_check, code);
break;
case OP_RCLT_UN:
case OP_RCGT_UN: {
guchar *jump_to_end;
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
break;
}
case OP_RCLT:
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
break;
default:
g_assert_not_reached ();
break;
}
break;
}
case OP_FCLT_MEMBASE:
case OP_FCGT_MEMBASE:
case OP_FCLT_UN_MEMBASE:
case OP_FCGT_UN_MEMBASE:
case OP_FCEQ_MEMBASE: {
guchar *unordered_check, *jump_to_end;
int x86_cond;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
switch (ins->opcode) {
case OP_FCEQ_MEMBASE:
x86_cond = X86_CC_EQ;
break;
case OP_FCLT_MEMBASE:
case OP_FCLT_UN_MEMBASE:
x86_cond = X86_CC_LT;
break;
case OP_FCGT_MEMBASE:
case OP_FCGT_UN_MEMBASE:
x86_cond = X86_CC_GT;
break;
default:
g_assert_not_reached ();
}
unordered_check = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
switch (ins->opcode) {
case OP_FCEQ_MEMBASE:
case OP_FCLT_MEMBASE:
case OP_FCGT_MEMBASE:
amd64_patch (unordered_check, code);
break;
case OP_FCLT_UN_MEMBASE:
case OP_FCGT_UN_MEMBASE:
jump_to_end = code;
x86_jump8 (code, 0);
amd64_patch (unordered_check, code);
amd64_inc_reg (code, ins->dreg);
amd64_patch (jump_to_end, code);
break;
default:
break;
}
break;
}
case OP_FBEQ: {
guchar *jump = code;
x86_branch8 (code, X86_CC_P, 0, TRUE);
EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
amd64_patch (jump, code);
break;
}
case OP_FBNE_UN:
/* Branch if C013 != 100 */
/* branch if !ZF or (PF|CF) */
EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
break;
case OP_FBLT:
EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
break;
case OP_FBLT_UN:
EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_GT, FALSE);
break;
case OP_FBGT:
case OP_FBGT_UN:
if (ins->opcode == OP_FBGT) {
guchar *br1;
/* skip branch if C1=1 */
br1 = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
/* branch if (C0 | C3) = 1 */
EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
amd64_patch (br1, code);
break;
} else {
EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
}
break;
case OP_FBGE: {
/* Branch if C013 == 100 or 001 */
guchar *br1;
/* skip branch if C1=1 */
br1 = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
/* branch if (C0 | C3) = 1 */
EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
amd64_patch (br1, code);
break;
}
case OP_FBGE_UN:
/* Branch if C013 == 000 */
EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
break;
case OP_FBLE: {
/* Branch if C013=000 or 100 */
guchar *br1;
/* skip branch if C1=1 */
br1 = code;
x86_branch8 (code, X86_CC_P, 0, FALSE);
/* branch if C0=0 */
EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
amd64_patch (br1, code);
break;
}
case OP_FBLE_UN:
/* Branch if C013 != 001 */
EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
break;
case OP_CKFINITE:
/* Transfer value to the fp stack */
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
amd64_push_reg (code, AMD64_RAX);
amd64_fxam (code);
amd64_fnstsw (code);
amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0x4100);
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
amd64_pop_reg (code, AMD64_RAX);
amd64_fstp (code, 0);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
break;
case OP_TLS_GET: {
code = mono_amd64_emit_tls_get (code, ins->dreg, ins->inst_offset);
break;
}
case OP_TLS_SET: {
code = mono_amd64_emit_tls_set (code, ins->sreg1, ins->inst_offset);
break;
}
case OP_MEMORY_BARRIER: {
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8: {
int dreg = ins->dreg;
guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
dreg = AMD64_R11;
amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
amd64_prefix (code, X86_LOCK_PREFIX);
amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
/* dreg contains the old value, add with sreg2 value */
amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
if (ins->dreg != dreg)
amd64_mov_reg_reg (code, ins->dreg, dreg, size);
break;
}
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8: {
guint32 size = ins->opcode == OP_ATOMIC_EXCHANGE_I4 ? 4 : 8;
/* LOCK prefix is implied. */
amd64_mov_reg_reg (code, GP_SCRATCH_REG, ins->sreg2, size);
amd64_xchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, GP_SCRATCH_REG, size);
amd64_mov_reg_reg (code, ins->dreg, GP_SCRATCH_REG, size);
break;
}
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8: {
guint32 size;
if (ins->opcode == OP_ATOMIC_CAS_I8)
size = 8;
else
size = 4;
/*
* See http://msdn.microsoft.com/en-us/magazine/cc302329.aspx for
* an explanation of how this works.
*/
g_assert (ins->sreg3 == AMD64_RAX);
g_assert (ins->sreg1 != AMD64_RAX);
g_assert (ins->sreg1 != ins->sreg2);
amd64_prefix (code, X86_LOCK_PREFIX);
amd64_cmpxchg_membase_reg_size (code, ins->sreg1, ins->inst_offset, ins->sreg2, size);
if (ins->dreg != AMD64_RAX)
amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
break;
}
case OP_ATOMIC_LOAD_I1: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
break;
}
case OP_ATOMIC_LOAD_U1: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
break;
}
case OP_ATOMIC_LOAD_I2: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
break;
}
case OP_ATOMIC_LOAD_U2: {
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
break;
}
case OP_ATOMIC_LOAD_I4: {
amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
}
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U8: {
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, ins->opcode == OP_ATOMIC_LOAD_U4 ? 4 : 8);
break;
}
case OP_ATOMIC_LOAD_R4: {
if (cfg->r4fp) {
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
} else {
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
}
break;
}
case OP_ATOMIC_LOAD_R8: {
amd64_sse_movsd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8: {
int size;
switch (ins->opcode) {
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
size = 1;
break;
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
size = 2;
break;
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
size = 4;
break;
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8:
size = 8;
break;
}
amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1, size);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_ATOMIC_STORE_R4: {
if (cfg->r4fp) {
amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
} else {
amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, MONO_ARCH_FP_SCRATCH_REG);
}
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_ATOMIC_STORE_R8: {
x86_nop (code);
x86_nop (code);
amd64_sse_movsd_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, ins->sreg1);
x86_nop (code);
x86_nop (code);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
x86_mfence (code);
break;
}
case OP_CARD_TABLE_WBARRIER: {
int ptr = ins->sreg1;
int value = ins->sreg2;
guchar *br = 0;
int nursery_shift, card_table_shift;
gpointer card_table_mask;
size_t nursery_size;
gpointer card_table = mono_gc_get_card_table (&card_table_shift, &card_table_mask);
guint64 nursery_start = (guint64)mono_gc_get_nursery (&nursery_shift, &nursery_size);
guint64 shifted_nursery_start = nursery_start >> nursery_shift;
/*If either point to the stack we can simply avoid the WB. This happens due to
* optimizations revealing a stack store that was not visible when op_cardtable was emited.
*/
if (ins->sreg1 == AMD64_RSP || ins->sreg2 == AMD64_RSP)
continue;
/*
* We need one register we can clobber, we choose EDX and make sreg1
* fixed EAX to work around limitations in the local register allocator.
* sreg2 might get allocated to EDX, but that is not a problem since
* we use it before clobbering EDX.
*/
g_assert (ins->sreg1 == AMD64_RAX);
/*
* This is the code we produce:
*
* edx = value
* edx >>= nursery_shift
* cmp edx, (nursery_start >> nursery_shift)
* jne done
* edx = ptr
* edx >>= card_table_shift
* edx += cardtable
* [edx] = 1
* done:
*/
if (mono_gc_card_table_nursery_check ()) {
if (value != AMD64_RDX)
amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
if (shifted_nursery_start >> 31) {
/*
* The value we need to compare against is 64 bits, so we need
* another spare register. We use RBX, which we save and
* restore.
*/
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
} else {
amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
}
br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
}
amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift);
if (card_table_mask)
amd64_alu_reg_imm (code, X86_AND, AMD64_RDX, (guint32)(guint64)card_table_mask);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, card_table);
amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0);
amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1);
if (mono_gc_card_table_nursery_check ())
x86_patch (br, code);
break;
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
/* TODO: Some of these IR opcodes are marked as no clobber when they indeed do. */
case OP_ADDPS:
amd64_sse_addps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPS:
amd64_sse_divps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPS:
amd64_sse_mulps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPS:
amd64_sse_subps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPS:
amd64_sse_maxps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPS:
amd64_sse_minps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
amd64_sse_cmpps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPS:
amd64_sse_andps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPS:
amd64_sse_andnps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPS:
amd64_sse_orps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPS:
amd64_sse_xorps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPS:
amd64_sse_sqrtps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RSQRTPS:
amd64_sse_rsqrtps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_RCPPS:
amd64_sse_rcpps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPS:
amd64_sse_addsubps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPS:
amd64_sse_haddps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPS:
amd64_sse_hsubps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPS_HIGH:
amd64_sse_movshdup_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_DUPPS_LOW:
amd64_sse_movsldup_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_PSHUFLEW_HIGH:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_pshufhw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLEW_LOW:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_pshuflw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLED:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_SHUFPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_shufps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_SHUFPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3);
amd64_sse_shufpd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ADDPD:
amd64_sse_addpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPD:
amd64_sse_divpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPD:
amd64_sse_mulpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPD:
amd64_sse_subpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPD:
amd64_sse_maxpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPD:
amd64_sse_minpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
amd64_sse_cmppd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPD:
amd64_sse_andpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPD:
amd64_sse_andnpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPD:
amd64_sse_orpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPD:
amd64_sse_xorpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPD:
amd64_sse_sqrtpd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPD:
amd64_sse_addsubpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPD:
amd64_sse_haddpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPD:
amd64_sse_hsubpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPD:
amd64_sse_movddup_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_EXTRACT_MASK:
amd64_sse_pmovmskb_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_PAND:
amd64_sse_pand_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PANDN:
amd64_sse_pandn_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_POR:
amd64_sse_por_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PXOR:
amd64_sse_pxor_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB:
amd64_sse_paddb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW:
amd64_sse_paddw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDD:
amd64_sse_paddd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDQ:
amd64_sse_paddq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB:
amd64_sse_psubb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW:
amd64_sse_psubw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBD:
amd64_sse_psubd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBQ:
amd64_sse_psubq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB_UN:
amd64_sse_pmaxub_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW_UN:
amd64_sse_pmaxuw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD_UN:
amd64_sse_pmaxud_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB:
amd64_sse_pmaxsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW:
amd64_sse_pmaxsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD:
amd64_sse_pmaxsd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGB_UN:
amd64_sse_pavgb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGW_UN:
amd64_sse_pavgw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB_UN:
amd64_sse_pminub_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW_UN:
amd64_sse_pminuw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND_UN:
amd64_sse_pminud_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB:
amd64_sse_pminsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW:
amd64_sse_pminsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND:
amd64_sse_pminsd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQB:
amd64_sse_pcmpeqb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQW:
amd64_sse_pcmpeqw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQD:
amd64_sse_pcmpeqd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQQ:
amd64_sse_pcmpeqq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTB:
amd64_sse_pcmpgtb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTW:
amd64_sse_pcmpgtw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTD:
amd64_sse_pcmpgtd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTQ:
amd64_sse_pcmpgtq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUM_ABS_DIFF:
amd64_sse_psadbw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWB:
amd64_sse_punpcklbw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWW:
amd64_sse_punpcklwd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWD:
amd64_sse_punpckldq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWQ:
amd64_sse_punpcklqdq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPS:
amd64_sse_unpcklps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPD:
amd64_sse_unpcklpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHB:
amd64_sse_punpckhbw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHW:
amd64_sse_punpckhwd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHD:
amd64_sse_punpckhdq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHQ:
amd64_sse_punpckhqdq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPS:
amd64_sse_unpckhps_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPD:
amd64_sse_unpckhpd_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW:
amd64_sse_packsswb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD:
amd64_sse_packssdw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW_UN:
amd64_sse_packuswb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD_UN:
amd64_sse_packusdw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT_UN:
amd64_sse_paddusb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT_UN:
amd64_sse_psubusb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT_UN:
amd64_sse_paddusw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT_UN:
amd64_sse_psubusw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT:
amd64_sse_paddsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT:
amd64_sse_psubsb_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT:
amd64_sse_paddsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT:
amd64_sse_psubsw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW:
amd64_sse_pmullw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULD:
amd64_sse_pmulld_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULQ:
amd64_sse_pmuludq_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH_UN:
amd64_sse_pmulhuw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH:
amd64_sse_pmulhw_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_PSHRW:
amd64_sse_psrlw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRW_REG:
amd64_sse_psrlw_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSARW:
amd64_sse_psraw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARW_REG:
amd64_sse_psraw_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLW:
amd64_sse_psllw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLW_REG:
amd64_sse_psllw_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRD:
amd64_sse_psrld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRD_REG:
amd64_sse_psrld_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSARD:
amd64_sse_psrad_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARD_REG:
amd64_sse_psrad_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLD:
amd64_sse_pslld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLD_REG:
amd64_sse_pslld_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRQ:
amd64_sse_psrlq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRQ_REG:
amd64_sse_psrlq_reg_reg (code, ins->dreg, ins->sreg2);
break;
/*TODO: This is appart of the sse spec but not added
case OP_PSARQ:
amd64_sse_psraq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARQ_REG:
amd64_sse_psraq_reg_reg (code, ins->dreg, ins->sreg2);
break;
*/
case OP_PSHLQ:
amd64_sse_psllq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLQ_REG:
amd64_sse_psllq_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_CVTDQ2PD:
amd64_sse_cvtdq2pd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTDQ2PS:
amd64_sse_cvtdq2ps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2DQ:
amd64_sse_cvtpd2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2PS:
amd64_sse_cvtpd2ps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2DQ:
amd64_sse_cvtps2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2PD:
amd64_sse_cvtps2pd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPD2DQ:
amd64_sse_cvttpd2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPS2DQ:
amd64_sse_cvttps2dq_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_ICONV_TO_X:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I4:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I8:
if (ins->inst_c0) {
amd64_movhlps_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8);
} else {
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
}
break;
case OP_EXTRACT_I1:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8);
amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I1, FALSE);
break;
case OP_EXTRACT_I2:
/*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/
amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == MONO_TYPE_I2, TRUE, 4);
break;
case OP_EXTRACT_R8:
if (ins->inst_c0)
amd64_movhlps_reg_reg (code, ins->dreg, ins->sreg1);
else
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_INSERT_I2:
amd64_sse_pinsrw_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_EXTRACTX_U2:
amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_INSERTX_U1_SLOW:
/*sreg1 is the extracted ireg (scratch)
/sreg2 is the to be inserted ireg (scratch)
/dreg is the xreg to receive the value*/
/*clear the bits from the extracted word*/
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00);
/*shift the value to insert if needed*/
if (ins->inst_c0 & 1)
amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4);
/*join them together*/
amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2);
break;
case OP_INSERTX_I4_SLOW:
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2);
amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16);
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1);
break;
case OP_INSERTX_I8_SLOW:
amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8);
if (ins->inst_c0)
amd64_movlhps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
else
amd64_sse_movsd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
break;
case OP_INSERTX_R4_SLOW:
switch (ins->inst_c0) {
case 0:
if (cfg->r4fp)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
else
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
break;
case 1:
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
if (cfg->r4fp)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
else
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
break;
case 2:
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
if (cfg->r4fp)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
else
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
break;
case 3:
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
if (cfg->r4fp)
amd64_sse_movss_reg_reg (code, ins->dreg, ins->sreg2);
else
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
break;
}
break;
case OP_INSERTX_R8_SLOW:
if (ins->inst_c0)
amd64_movlhps_reg_reg (code, ins->dreg, ins->sreg2);
else
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg2);
break;
case OP_STOREX_MEMBASE_REG:
case OP_STOREX_MEMBASE:
amd64_sse_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_LOADX_MEMBASE:
amd64_sse_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_LOADX_ALIGNED_MEMBASE:
amd64_sse_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_STOREX_ALIGNED_MEMBASE_REG:
amd64_sse_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_STOREX_NTA_MEMBASE_REG:
amd64_sse_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_PREFETCH_MEMBASE:
amd64_sse_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
break;
case OP_XMOVE:
/*FIXME the peephole pass should have killed this*/
if (ins->dreg != ins->sreg1)
amd64_sse_movaps_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_XZERO:
amd64_sse_pxor_reg_reg (code, ins->dreg, ins->dreg);
break;
case OP_XONES:
amd64_sse_pcmpeqb_reg_reg (code, ins->dreg, ins->dreg);
break;
case OP_ICONV_TO_R4_RAW:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
if (!cfg->r4fp)
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
break;
case OP_FCONV_TO_R8_X:
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_XCONV_R8_TO_I4:
amd64_sse_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
switch (ins->backend.source_opcode) {
case OP_FCONV_TO_I1:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
break;
case OP_FCONV_TO_U1:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_FCONV_TO_I2:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
break;
case OP_FCONV_TO_U2:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
break;
}
break;
case OP_EXPAND_I2:
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 0);
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 1);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I4:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I8:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44);
break;
case OP_EXPAND_R4:
if (cfg->r4fp) {
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
} else {
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->dreg);
}
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_R8:
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44);
break;
case OP_SSE41_ROUNDP: {
if (ins->inst_c1 == MONO_TYPE_R8)
amd64_sse_roundpd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
else
g_assert_not_reached (); // roundps, but it's not used anywhere for non-llvm back-end yet.
break;
}
#endif
case OP_LZCNT32:
amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_LZCNT64:
amd64_sse_lzcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_POPCNT32:
amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_POPCNT64:
amd64_sse_popcnt_reg_reg_size (code, ins->dreg, ins->sreg1, 8);
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
guint8 *br [1];
amd64_test_membase_imm_size (code, ins->sreg1, 0, 1, 4);
br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_threads_state_poll);
amd64_patch (br[0], code);
break;
}
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
break;
case OP_GC_SPILL_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
case OP_GET_LAST_ERROR:
code = emit_get_last_error(code, ins->dreg);
break;
case OP_FILL_PROF_CALL_CTX:
for (int i = 0; i < AMD64_NREG; i++)
if (AMD64_IS_CALLEE_SAVED_REG (i) || i == AMD64_RSP)
amd64_mov_membase_reg (code, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, gregs) + i * sizeof (target_mgreg_t), i, sizeof (target_mgreg_t));
break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
g_assertf ((code - cfg->native_code - offset) <= max_len,
"wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, (int)(code - cfg->native_code - offset));
}
set_code_cursor (cfg, code);
}
#endif /* DISABLE_JIT */
G_BEGIN_DECLS
void __chkstk (void);
void ___chkstk_ms (void);
G_END_DECLS
void
mono_arch_register_lowlevel_calls (void)
{
/* The signature doesn't matter */
mono_register_jit_icall (mono_amd64_throw_exception, mono_icall_sig_void, TRUE);
#if defined(TARGET_WIN32) || defined(HOST_WIN32)
#if _MSC_VER
mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, __chkstk, "mono_chkstk_win64", NULL, TRUE, "__chkstk");
#else
mono_register_jit_icall_info (&mono_get_jit_icall_info ()->mono_chkstk_win64, ___chkstk_ms, "mono_chkstk_win64", NULL, TRUE, "___chkstk_ms");
#endif
#endif
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
unsigned char *ip = ji->ip.i + code;
/*
* Debug code to help track down problems where the target of a near call is
* is not valid.
*/
if (amd64_is_near_call (ip)) {
gint64 disp = (guint8*)target - (guint8*)ip;
if (!amd64_is_imm32 (disp)) {
printf ("TYPE: %d\n", ji->type);
switch (ji->type) {
case MONO_PATCH_INFO_JIT_ICALL_ID:
printf ("V: %s\n", mono_find_jit_icall_info (ji->data.jit_icall_id)->name);
break;
case MONO_PATCH_INFO_METHOD_JUMP:
case MONO_PATCH_INFO_METHOD:
printf ("V: %s\n", ji->data.method->name);
break;
default:
break;
}
}
}
amd64_patch (ip, (gpointer)target);
}
#ifndef DISABLE_JIT
static int
get_max_epilog_size (MonoCompile *cfg)
{
int max_epilog_size = 16;
if (cfg->method->save_lmf)
max_epilog_size += 256;
max_epilog_size += (AMD64_NREG * 2);
return max_epilog_size;
}
/*
* This macro is used for testing whenever the unwinder works correctly at every point
* where an async exception can happen.
*/
/* This will generate a SIGSEGV at the given point in the code */
#define async_exc_point(code) do { \
if (mono_inject_async_exc_method && mono_method_desc_full_match (mono_inject_async_exc_method, cfg->method)) { \
if (cfg->arch.async_point_count == mono_inject_async_exc_pos) \
amd64_mov_reg_mem (code, AMD64_RAX, 0, 4); \
cfg->arch.async_point_count ++; \
} \
} while (0)
#ifdef TARGET_WIN32
static guint8 *
emit_prolog_setup_sp_win64 (MonoCompile *cfg, guint8 *code, int alloc_size, int *cfa_offset_input)
{
int cfa_offset = *cfa_offset_input;
/* Allocate windows stack frame using stack probing method */
if (alloc_size) {
if (alloc_size >= 0x1000) {
amd64_mov_reg_imm (code, AMD64_RAX, alloc_size);
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_chkstk_win64);
}
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
if (cfg->arch.omit_fp) {
cfa_offset += alloc_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
// NOTE, in a standard win64 prolog the alloc unwind info is always emitted, but since mono
// uses a frame pointer with negative offsets and a standard win64 prolog assumes positive offsets, we can't
// emit sp alloc unwind metadata since the native OS unwinder will incorrectly restore sp. Excluding the alloc
// metadata on the other hand won't give the OS the information so it can just restore the frame pointer to sp and
// that will retrieve the expected results.
if (cfg->arch.omit_fp)
mono_emit_unwind_op_sp_alloc (cfg, code, alloc_size);
}
*cfa_offset_input = cfa_offset;
set_code_cursor (cfg, code);
return code;
}
#endif /* TARGET_WIN32 */
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *ins;
int alloc_size, pos, i, cfa_offset, quad, max_epilog_size, save_area_offset;
guint8 *code;
CallInfo *cinfo;
MonoInst *lmf_var = cfg->lmf_var;
gboolean args_clobbered = FALSE;
cfg->code_size = MAX (cfg->header->code_size * 4, 1024);
code = cfg->native_code = (unsigned char *)g_malloc (cfg->code_size);
/* Amount of stack space allocated by register saving code */
pos = 0;
/* Offset between RSP and the CFA */
cfa_offset = 0;
/*
* The prolog consists of the following parts:
* FP present:
* - push rbp
* - mov rbp, rsp
* - save callee saved regs using moves
* - allocate frame
* - save rgctx if needed
* - save lmf if needed
* FP not present:
* - allocate frame
* - save rgctx if needed
* - save lmf if needed
* - save callee saved regs using moves
*/
// CFA = sp + 8
cfa_offset = 8;
mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8);
// IP saved at CFA - 8
mono_emit_unwind_op_offset (cfg, code, AMD64_RIP, -cfa_offset);
async_exc_point (code);
mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
if (!cfg->arch.omit_fp) {
amd64_push_reg (code, AMD64_RBP);
cfa_offset += 8;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - cfa_offset);
async_exc_point (code);
/* These are handled automatically by the stack marking code */
mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
mono_emit_unwind_op_fp_alloc (cfg, code, AMD64_RBP, 0);
async_exc_point (code);
}
/* The param area is always at offset 0 from sp */
/* This needs to be allocated here, since it has to come after the spill area */
if (cfg->param_area) {
if (cfg->arch.omit_fp)
// FIXME:
g_assert_not_reached ();
cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (target_mgreg_t));
}
if (cfg->arch.omit_fp) {
/*
* On enter, the stack is misaligned by the pushing of the return
* address. It is either made aligned by the pushing of %rbp, or by
* this.
*/
alloc_size = ALIGN_TO (cfg->stack_offset, 8);
if ((alloc_size % 16) == 0) {
alloc_size += 8;
/* Mark the padding slot as NOREF */
mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset - sizeof (target_mgreg_t), SLOT_NOREF);
}
} else {
alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
if (cfg->stack_offset != alloc_size) {
/* Mark the padding slot as NOREF */
mini_gc_set_slot_type_from_fp (cfg, -alloc_size + cfg->param_area, SLOT_NOREF);
}
cfg->arch.sp_fp_offset = alloc_size;
alloc_size -= pos;
}
cfg->arch.stack_alloc_size = alloc_size;
set_code_cursor (cfg, code);
/* Allocate stack frame */
#ifdef TARGET_WIN32
code = emit_prolog_setup_sp_win64 (cfg, code, alloc_size, &cfa_offset);
#else
if (alloc_size) {
/* See mono_emit_stack_alloc */
#if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
/* Use a loop for large sizes */
if (remaining_size > 10 * 0x1000) {
amd64_mov_reg_imm (code, X86_EAX, remaining_size / 0x1000);
guint8 *label = code;
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RAX, 1);
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
guint8 *label2 = code;
x86_branch8 (code, X86_CC_NE, 0, FALSE);
amd64_patch (label2, label);
if (cfg->arch.omit_fp) {
cfa_offset += (remaining_size / 0x1000) * 0x1000;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
}
remaining_size = remaining_size % 0x1000;
set_code_cursor (cfg, code);
}
guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 11; /*11 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/
code = realloc_code (cfg, required_code_size);
while (remaining_size >= 0x1000) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
if (cfg->arch.omit_fp) {
cfa_offset += 0x1000;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
}
async_exc_point (code);
amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
remaining_size -= 0x1000;
}
if (remaining_size) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
if (cfg->arch.omit_fp) {
cfa_offset += remaining_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
}
#else
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
if (cfg->arch.omit_fp) {
cfa_offset += alloc_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
#endif
}
#endif
/* Stack alignment check */
#if 0
{
guint8 *buf;
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
buf = code;
x86_branch8 (code, X86_CC_EQ, 1, FALSE);
amd64_breakpoint (code);
amd64_patch (buf, code);
}
#endif
if (mini_debug_options.init_stacks) {
/* Fill the stack frame with a dummy value to force deterministic behavior */
/* Save registers to the red zone */
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDI, 8);
amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8);
MONO_DISABLE_WARNING (4310) // cast truncates constant value
amd64_mov_reg_imm (code, AMD64_RAX, 0x2a2a2a2a2a2a2a2a);
MONO_RESTORE_WARNING
amd64_mov_reg_imm (code, AMD64_RCX, alloc_size / 8);
amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
amd64_cld (code);
amd64_prefix (code, X86_REP_PREFIX);
amd64_stosl (code);
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
}
/* Save LMF */
if (method->save_lmf)
code = emit_setup_lmf (cfg, code, lmf_var->inst_offset, cfa_offset);
/* Save callee saved registers */
if (cfg->arch.omit_fp) {
save_area_offset = cfg->arch.reg_save_area_offset;
/* Save caller saved registers after sp is adjusted */
/* The registers are saved at the bottom of the frame */
/* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
} else {
/* The registers are saved just below the saved rbp */
save_area_offset = cfg->arch.reg_save_area_offset;
}
for (i = 0; i < AMD64_NREG; ++i) {
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
amd64_mov_membase_reg (code, cfg->frame_reg, save_area_offset, i, 8);
if (cfg->arch.omit_fp) {
mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
/* These are handled automatically by the stack marking code */
mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
} else {
mono_emit_unwind_op_offset (cfg, code, i, - (-save_area_offset + (2 * 8)));
// FIXME: GC
}
save_area_offset += 8;
async_exc_point (code);
}
}
/* store runtime generic context */
if (cfg->rgctx_var) {
g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET &&
(cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP));
amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer));
mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, cfg->rgctx_var, FALSE, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, code - cfg->native_code, 0);
}
/* compute max_length in order to use short forward jumps */
max_epilog_size = get_max_epilog_size (cfg);
if (cfg->opt & MONO_OPT_BRANCH && cfg->max_block_num < MAX_BBLOCKS_FOR_BRANCH_OPTS) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
int max_length = 0;
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_length += LOOP_ALIGNMENT;
MONO_BB_FOR_EACH_INS (bb, ins) {
max_length += ins_get_size (ins->opcode);
}
/* Take prolog and epilog instrumentation into account */
if (bb == cfg->bb_entry || bb == cfg->bb_exit)
max_length += max_epilog_size;
bb->max_length = max_length;
}
}
sig = mono_method_signature_internal (method);
pos = 0;
cinfo = cfg->arch.cinfo;
if (sig->ret->type != MONO_TYPE_VOID) {
/* Save volatile arguments to the stack */
if (cfg->vret_addr && (cfg->vret_addr->opcode != OP_REGVAR))
amd64_mov_membase_reg (code, cfg->vret_addr->inst_basereg, cfg->vret_addr->inst_offset, cinfo->ret.reg, 8);
}
/* Keep this in sync with emit_load_volatile_arguments */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
ins = cfg->args [i];
if (ins->flags & MONO_INST_IS_DEAD && !MONO_CFG_PROFILE (cfg, ENTER_CONTEXT))
/* Unused arguments */
continue;
/* Save volatile arguments to the stack */
if (ins->opcode != OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg: {
guint32 size = 8;
/* FIXME: I1 etc */
/*
if (stack_offset & 0x1)
size = 1;
else if (stack_offset & 0x2)
size = 2;
else if (stack_offset & 0x4)
size = 4;
else
size = 8;
*/
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, size);
/*
* Save the original location of 'this',
* mono_get_generic_info_from_stack_frame () needs this to properly look up
* the argument value during the handling of async exceptions.
*/
if (i == 0 && sig->hasthis) {
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
break;
}
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg);
break;
case ArgInDoubleSSEReg:
amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg);
break;
case ArgValuetypeInReg:
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad], sizeof (target_mgreg_t));
break;
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
}
}
break;
case ArgValuetypeAddrInIReg:
if (ainfo->pair_storage [0] == ArgInIReg)
amd64_mov_membase_reg (code, ins->inst_left->inst_basereg, ins->inst_left->inst_offset, ainfo->pair_regs [0], sizeof (target_mgreg_t));
break;
case ArgValuetypeAddrOnStack:
break;
case ArgGSharedVtInReg:
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, 8);
break;
default:
break;
}
} else {
/* Argument allocated to (non-volatile) register */
switch (ainfo->storage) {
case ArgInIReg:
amd64_mov_reg_reg (code, ins->dreg, ainfo->reg, 8);
break;
case ArgOnStack:
amd64_mov_reg_membase (code, ins->dreg, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
break;
default:
g_assert_not_reached ();
}
if (i == 0 && sig->hasthis) {
g_assert (ainfo->storage == ArgInIReg);
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0);
}
}
}
if (cfg->method->save_lmf)
args_clobbered = TRUE;
/*
* Optimize the common case of the first bblock making a call with the same
* arguments as the method. This works because the arguments are still in their
* original argument registers.
* FIXME: Generalize this
*/
if (!args_clobbered) {
MonoBasicBlock *first_bb = cfg->bb_entry;
MonoInst *next;
int filter = FILTER_IL_SEQ_POINT;
next = mono_bb_first_inst (first_bb, filter);
if (!next && first_bb->next_bb) {
first_bb = first_bb->next_bb;
next = mono_bb_first_inst (first_bb, filter);
}
if (first_bb->in_count > 1)
next = NULL;
for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gboolean match = FALSE;
ins = cfg->args [i];
if (ins->opcode != OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg: {
if (((next->opcode == OP_LOAD_MEMBASE) || (next->opcode == OP_LOADI4_MEMBASE)) && next->inst_basereg == ins->inst_basereg && next->inst_offset == ins->inst_offset) {
if (next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
} else {
next->opcode = OP_MOVE;
next->sreg1 = ainfo->reg;
/* Only continue if the instruction doesn't change argument regs */
if (next->dreg == ainfo->reg || next->dreg == AMD64_RAX)
match = TRUE;
}
}
break;
}
default:
break;
}
} else {
/* Argument allocated to (non-volatile) register */
switch (ainfo->storage) {
case ArgInIReg:
if (next->opcode == OP_MOVE && next->sreg1 == ins->dreg && next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
}
break;
default:
break;
}
}
if (match) {
next = mono_inst_next (next, filter);
//next = mono_inst_list_next (&next->node, &first_bb->ins_list);
if (!next)
break;
}
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
/* Initialize seq_point_info_var */
if (cfg->compile_aot) {
/* Initialize the variable from a GOT slot */
/* Same as OP_AOTCONST */
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer));
g_assert (info_var->opcode == OP_REGOFFSET);
amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8);
}
if (cfg->compile_aot) {
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr), 8);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
} else {
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
amd64_mov_reg_imm (code, AMD64_R11, (guint64)&ss_trampoline);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
/* Initialize bp_tramp_var */
ins = cfg->arch.bp_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
amd64_mov_reg_imm (code, AMD64_R11, (guint64)&bp_trampoline);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
}
set_code_cursor (cfg, code);
return code;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
int quad, i;
guint8 *code;
int max_epilog_size;
CallInfo *cinfo;
gint32 lmf_offset = cfg->lmf_var ? cfg->lmf_var->inst_offset : -1;
gint32 save_area_offset = cfg->arch.reg_save_area_offset;
max_epilog_size = get_max_epilog_size (cfg);
code = realloc_code (cfg, max_epilog_size);
cfg->has_unwind_info_for_epilog = TRUE;
/* Mark the start of the epilog */
mono_emit_unwind_op_mark_loc (cfg, code, 0);
/* Save the uwind state which is needed by the out-of-line code */
mono_emit_unwind_op_remember_state (cfg, code);
/* the code restoring the registers must be kept in sync with OP_TAILCALL */
if (method->save_lmf) {
if (cfg->used_int_regs & (1 << AMD64_RBP))
amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
if (cfg->arch.omit_fp)
/*
* emit_setup_lmf () marks RBP as saved, we have to mark it as same value here before clearing up the stack
* since its stack slot will become invalid.
*/
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
}
/* Restore callee saved regs */
for (i = 0; i < AMD64_NREG; ++i) {
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
/* Restore only used_int_regs, not arch.saved_iregs */
#if defined(MONO_SUPPORT_TASKLETS)
int restore_reg = 1;
#else
int restore_reg = (cfg->used_int_regs & (1 << i));
#endif
if (restore_reg) {
amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8);
mono_emit_unwind_op_same_value (cfg, code, i);
async_exc_point (code);
}
save_area_offset += 8;
}
}
/* Load returned vtypes into registers if needed */
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == ArgValuetypeInReg) {
ArgInfo *ainfo = &cinfo->ret;
MonoInst *inst = cfg->ret;
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)), ainfo->pair_size [quad]);
break;
case ArgInFloatSSEReg:
amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)));
break;
case ArgInDoubleSSEReg:
amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (target_mgreg_t)));
break;
case ArgNone:
break;
default:
g_assert_not_reached ();
}
}
}
if (cfg->arch.omit_fp) {
if (cfg->arch.stack_alloc_size) {
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
}
} else {
#ifdef TARGET_WIN32
amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
amd64_pop_reg (code, AMD64_RBP);
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
#else
amd64_leave (code);
mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
#endif
}
mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8);
async_exc_point (code);
amd64_ret (code);
/* Restore the unwind state to be the same as before the epilog */
mono_emit_unwind_op_restore_state (cfg, code);
set_code_cursor (cfg, code);
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
int nthrows, i;
guint8 *code;
MonoClass *exc_classes [16];
guint8 *exc_throw_start [16], *exc_throw_end [16];
guint32 code_size = 0;
/* Compute needed space */
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_EXC)
code_size += 40;
if (patch_info->type == MONO_PATCH_INFO_R8)
code_size += 8 + 15; /* sizeof (double) + alignment */
if (patch_info->type == MONO_PATCH_INFO_R4)
code_size += 4 + 15; /* sizeof (float) + alignment */
if (patch_info->type == MONO_PATCH_INFO_GC_CARD_TABLE_ADDR)
code_size += 8 + 7; /*sizeof (void*) + alignment */
}
code = realloc_code (cfg, code_size);
/* add code to raise exceptions */
nthrows = 0;
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC: {
MonoClass *exc_class;
guint8 *buf, *buf2;
guint32 throw_ip;
amd64_patch (patch_info->ip.i + cfg->native_code, code);
exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
throw_ip = patch_info->ip.i;
//x86_breakpoint (code);
/* Find a throw sequence for the same exception class */
for (i = 0; i < nthrows; ++i)
if (exc_classes [i] == exc_class)
break;
if (i < nthrows) {
amd64_mov_reg_imm (code, AMD64_ARG_REG2, (exc_throw_end [i] - cfg->native_code) - throw_ip);
x86_jump_code (code, exc_throw_start [i]);
patch_info->type = MONO_PATCH_INFO_NONE;
}
else {
buf = code;
amd64_mov_reg_imm_size (code, AMD64_ARG_REG2, 0xf0f0f0f0, 4);
buf2 = code;
if (nthrows < 16) {
exc_classes [nthrows] = exc_class;
exc_throw_start [nthrows] = code;
}
amd64_mov_reg_imm (code, AMD64_ARG_REG1, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF);
patch_info->type = MONO_PATCH_INFO_NONE;
code = emit_call (cfg, NULL, code, MONO_JIT_ICALL_mono_arch_throw_corlib_exception);
amd64_mov_reg_imm (buf, AMD64_ARG_REG2, (code - cfg->native_code) - throw_ip);
while (buf < buf2)
x86_nop (buf);
if (nthrows < 16) {
exc_throw_end [nthrows] = code;
nthrows ++;
}
}
break;
}
default:
/* do nothing */
break;
}
set_code_cursor (cfg, code);
}
/* Handle relocations with RIP relative addressing */
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
gboolean remove = FALSE;
guint8 *orig_code = code;
switch (patch_info->type) {
case MONO_PATCH_INFO_R8:
case MONO_PATCH_INFO_R4: {
guint8 *pos, *patch_pos;
guint32 target_pos;
/* The SSE opcodes require a 16 byte alignment */
code = (guint8*)ALIGN_TO (code, 16);
pos = cfg->native_code + patch_info->ip.i;
if (IS_REX (pos [1])) {
patch_pos = pos + 5;
target_pos = code - pos - 9;
}
else {
patch_pos = pos + 4;
target_pos = code - pos - 8;
}
if (patch_info->type == MONO_PATCH_INFO_R8) {
*(double*)code = *(double*)patch_info->data.target;
code += sizeof (double);
} else {
*(float*)code = *(float*)patch_info->data.target;
code += sizeof (float);
}
*(guint32*)(patch_pos) = target_pos;
remove = TRUE;
break;
}
case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR: {
guint8 *pos;
if (cfg->compile_aot)
continue;
/*loading is faster against aligned addresses.*/
code = (guint8*)ALIGN_TO (code, 8);
memset (orig_code, 0, code - orig_code);
pos = cfg->native_code + patch_info->ip.i;
/*alu_op [rex] modr/m imm32 - 7 or 8 bytes */
if (IS_REX (pos [1]))
*(guint32*)(pos + 4) = (guint8*)code - pos - 8;
else
*(guint32*)(pos + 3) = (guint8*)code - pos - 7;
*(gpointer*)code = (gpointer)patch_info->data.target;
code += sizeof (gpointer);
remove = TRUE;
break;
}
default:
break;
}
if (remove) {
if (patch_info == cfg->patch_info)
cfg->patch_info = patch_info->next;
else {
MonoJumpInfo *tmp;
for (tmp = cfg->patch_info; tmp->next != patch_info; tmp = tmp->next)
;
tmp->next = patch_info->next;
}
}
set_code_cursor (cfg, code);
}
set_code_cursor (cfg, code);
}
#endif /* DISABLE_JIT */
MONO_NEVER_INLINE
void
mono_arch_flush_icache (guint8 *code, gint size)
{
/* call/ret required (or likely other control transfer) */
}
void
mono_arch_flush_register_windows (void)
{
}
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return amd64_use_imm32 (imm);
}
/*
* Determine whenever the trap whose info is in SIGINFO is caused by
* integer overflow.
*/
gboolean
mono_arch_is_int_overflow (void *sigctx, void *info)
{
MonoContext ctx;
guint8* rip;
int reg;
gint64 value;
mono_sigctx_to_monoctx (sigctx, &ctx);
rip = (guint8*)ctx.gregs [AMD64_RIP];
if (IS_REX (rip [0])) {
reg = amd64_rex_b (rip [0]);
rip ++;
}
else
reg = 0;
if ((rip [0] == 0xf7) && (x86_modrm_mod (rip [1]) == 0x3) && (x86_modrm_reg (rip [1]) == 0x7)) {
/* idiv REG */
reg += x86_modrm_rm (rip [1]);
value = ctx.gregs [reg];
if (value == -1)
return TRUE;
}
return FALSE;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return 3;
}
/**
* \return TRUE if no sw breakpoint was present (always).
*
* Copy \p size bytes from \p code - \p offset to the buffer \p buf. If the debugger inserted software
* breakpoints in the original code, they are removed in the copy.
*/
gboolean
mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
{
/*
* If method_start is non-NULL we need to perform bound checks, since we access memory
* at code - offset we could go before the start of the method and end up in a different
* page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
* instead.
*/
if (!method_start || code - offset >= method_start) {
memcpy (buf, code - offset, size);
} else {
int diff = code - method_start;
memset (buf, 0, size);
memcpy (buf + offset - diff, method_start, diff + size - offset);
}
return TRUE;
}
int
mono_arch_get_this_arg_reg (guint8 *code)
{
return AMD64_ARG_REG1;
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [mono_arch_get_this_arg_reg (code)];
}
#define MAX_ARCH_DELEGATE_PARAMS 10
static gpointer
get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count)
{
guint8 *code, *start;
GSList *unwind_ops = NULL;
int i;
unwind_ops = mono_arch_get_cie_program ();
const int size = 64;
start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
if (has_target) {
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
} else {
if (param_count == 0) {
amd64_jump_membase (code, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
} else {
/* We have to shift the arguments left */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
for (i = 0; i < param_count; ++i) {
#ifdef TARGET_WIN32
if (i < 3)
amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
else
amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, 0x28, 8);
#else
amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
#endif
}
amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
}
}
g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
mono_arch_flush_icache (start, code - start);
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
} else {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
*info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
g_free (name);
}
if (mono_jit_map_is_enabled ()) {
char *buff;
if (has_target)
buff = (char*)"delegate_invoke_has_target";
else
buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count);
mono_emit_jit_tramp (start, code - start, buff);
if (!has_target)
g_free (buff);
}
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
return start;
}
#define MAX_VIRTUAL_DELEGATE_OFFSET 32
static gpointer
get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset)
{
guint8 *code, *start;
const int size = 20;
char *tramp_name;
GSList *unwind_ops;
if (offset / (int)sizeof (target_mgreg_t) > MAX_VIRTUAL_DELEGATE_OFFSET)
return NULL;
start = code = (guint8 *)mono_global_codeman_reserve (size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
unwind_ops = mono_arch_get_cie_program ();
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
if (load_imt_reg) {
/* Load the IMT reg */
amd64_mov_reg_membase (code, MONO_ARCH_IMT_REG, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 8);
}
/* Load the vtable */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8);
amd64_jump_membase (code, AMD64_RAX, offset);
g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
tramp_name = mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset);
*info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops);
g_free (tramp_name);
return start;
}
/*
* mono_arch_get_delegate_invoke_impls:
*
* Return a list of MonoTrampInfo structures for the delegate invoke impl
* trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
MonoTrampInfo *info;
int i;
get_delegate_invoke_impl (&info, TRUE, 0);
res = g_slist_prepend (res, info);
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
get_delegate_invoke_impl (&info, FALSE, i);
res = g_slist_prepend (res, info);
}
for (i = 1; i <= MONO_IMT_SIZE; ++i) {
get_delegate_virtual_invoke_impl (&info, TRUE, - i * TARGET_SIZEOF_VOID_P);
res = g_slist_prepend (res, info);
}
for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
get_delegate_virtual_invoke_impl (&info, FALSE, i * TARGET_SIZEOF_VOID_P);
res = g_slist_prepend (res, info);
get_delegate_virtual_invoke_impl (&info, TRUE, i * TARGET_SIZEOF_VOID_P);
res = g_slist_prepend (res, info);
}
return res;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
/* FIXME: Support more cases */
if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret)))
return NULL;
if (has_target) {
static guint8* cached = NULL;
if (cached)
return cached;
if (mono_ee_features.use_aot_trampolines) {
start = (guint8 *)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
} else {
MonoTrampInfo *info;
start = (guint8 *)get_delegate_invoke_impl (&info, TRUE, 0);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cached = start;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
if (sig->param_count > 4)
return NULL;
code = cache [sig->param_count];
if (code)
return code;
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8 *)mono_aot_get_trampoline (name);
g_free (name);
} else {
MonoTrampInfo *info;
start = (guint8 *)get_delegate_invoke_impl (&info, FALSE, sig->param_count);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cache [sig->param_count] = start;
}
return start;
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
MonoTrampInfo *info;
gpointer code;
code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset);
if (code)
mono_tramp_info_register (info, NULL);
return code;
}
void
mono_arch_finish_init (void)
{
#if !defined(HOST_WIN32) && defined(MONO_XEN_OPT)
optimize_for_xen = access ("/proc/xen", F_OK) == 0;
#endif
}
#define CMP_SIZE (6 + 1)
#define CMP_REG_REG_SIZE (4 + 1)
#define BR_SMALL_SIZE 2
#define BR_LARGE_SIZE 6
#define MOV_REG_IMM_SIZE 10
#define MOV_REG_IMM_32BIT_SIZE 6
#define JUMP_REG_SIZE (2 + 1)
static int
imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
{
int i, distance = 0;
for (i = start; i < target; ++i)
distance += imt_entries [i]->chunk_size;
return distance;
}
/*
* LOCKING: called with the domain lock held
*/
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int i;
int size = 0;
guint8 *code, *start;
gboolean vtable_is_32bit = ((gsize)(vtable) == (gsize)(int)(gsize)(vtable));
GSList *unwind_ops;
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
}
if (item->has_target_code) {
item->chunk_size += MOV_REG_IMM_SIZE;
} else {
if (vtable_is_32bit)
item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE;
}
item->chunk_size += BR_SMALL_SIZE + JUMP_REG_SIZE;
} else {
if (fail_tramp) {
item->chunk_size += MOV_REG_IMM_SIZE * 3 + CMP_REG_REG_SIZE +
BR_SMALL_SIZE + JUMP_REG_SIZE * 2;
} else {
if (vtable_is_32bit)
item->chunk_size += MOV_REG_IMM_32BIT_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE;
item->chunk_size += JUMP_REG_SIZE;
/* with assert below:
* item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
*/
}
}
} else {
if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
item->chunk_size += BR_LARGE_SIZE;
imt_entries [item->check_target_idx]->compare_done = TRUE;
}
size += item->chunk_size;
}
if (fail_tramp) {
code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
} else {
code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
}
start = code;
unwind_ops = mono_arch_get_cie_program ();
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
if (item->has_target_code) {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->value.target_code);
amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
} else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
}
if (fail_case) {
amd64_patch (item->jmp_code, code);
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, fail_tramp);
amd64_jump_reg (code, MONO_ARCH_IMT_SCRATCH_REG);
item->jmp_code = NULL;
}
} else {
/* enable the commented code to assert on wrong method */
#if 0
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
/* See the comment below about R10 */
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
amd64_patch (item->jmp_code, code);
amd64_breakpoint (code);
item->jmp_code = NULL;
#else
/* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG)
needs to be preserved. R10 needs
to be preserved for calls which
require a runtime generic context,
but interface calls don't. */
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, & (vtable->vtable [item->value.vtable_slot]));
amd64_jump_membase (code, MONO_ARCH_IMT_SCRATCH_REG, 0);
#endif
}
} else {
if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (target_mgreg_t));
else {
amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (target_mgreg_t));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
x86_branch8 (code, X86_CC_GE, 0, FALSE);
else
x86_branch32 (code, X86_CC_GE, 0, FALSE);
}
g_assertf (code - item->code_target <= item->chunk_size, "%X %X", (guint)(code - item->code_target), (guint)item->chunk_size);
}
/* patch the branches to get to the target items */
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code) {
if (item->check_target_idx) {
amd64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
}
}
}
if (!fail_tramp)
UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
g_assert (code - start <= size);
g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), mem_manager);
return start;
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, AMD64_RSP, 8);
mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, AMD64_RIP, -8);
return l;
}
#ifndef DISABLE_JIT
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
int opcode = 0;
if (cmethod->klass == mono_class_try_get_math_class ()) {
if (strcmp (cmethod->name, "Sqrt") == 0) {
opcode = OP_SQRT;
} else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
opcode = OP_ABS;
}
if (opcode && fsig->param_count == 1) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = args [0]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
opcode = 0;
if (cfg->opt & MONO_OPT_CMOV) {
if (strcmp (cmethod->name, "Min") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMIN;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMIN_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMIN;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMIN_UN;
} else if (strcmp (cmethod->name, "Max") == 0) {
if (fsig->params [0]->type == MONO_TYPE_I4)
opcode = OP_IMAX;
if (fsig->params [0]->type == MONO_TYPE_U4)
opcode = OP_IMAX_UN;
else if (fsig->params [0]->type == MONO_TYPE_I8)
opcode = OP_LMAX;
else if (fsig->params [0]->type == MONO_TYPE_U8)
opcode = OP_LMAX_UN;
}
}
if (opcode && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
#if 0
/* OP_FREM is not IEEE compatible */
else if (strcmp (cmethod->name, "IEEERemainder") == 0 && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, OP_FREM);
ins->inst_i0 = args [0];
ins->inst_i1 = args [1];
}
#endif
if ((mini_get_cpu_features (cfg) & MONO_CPU_X86_SSE41) != 0 && fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
int mode = -1;
if (!strcmp (cmethod->name, "Round"))
mode = 0;
else if (!strcmp (cmethod->name, "Floor"))
mode = 1;
else if (!strcmp (cmethod->name, "Ceiling"))
mode = 2;
if (mode != -1) {
int xreg = alloc_xreg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_FCONV_TO_R8_X, xreg, args [0]->dreg);
EMIT_NEW_UNALU (cfg, ins, OP_SSE41_ROUNDP, xreg, xreg);
ins->inst_c0 = mode;
ins->inst_c1 = MONO_TYPE_R8;
int dreg = alloc_freg (cfg);
EMIT_NEW_UNALU (cfg, ins, OP_EXTRACT_R8, dreg, xreg);
ins->inst_c0 = 0;
ins->inst_c1 = MONO_TYPE_R8;
return ins;
}
}
}
return ins;
}
#endif
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->gregs [reg];
}
host_mgreg_t *
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->gregs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->gregs [reg] = val;
}
/*
* mono_arch_emit_load_aotconst:
*
* Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
* TARGET from the mscorlib GOT in full-aot code.
* On AMD64, the result is placed into R11.
*/
guint8*
mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target)
{
*ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
return code;
}
/*
* mono_arch_get_trampolines:
*
* Return a list of MonoTrampInfo structures describing arch specific trampolines
* for AOT.
*/
GSList *
mono_arch_get_trampolines (gboolean aot)
{
return mono_amd64_get_exception_trampolines (aot);
}
/* Soft Debug support */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
/*
* mono_arch_set_breakpoint:
*
* Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
* The location should contain code emitted by OP_SEQ_POINT.
*/
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start);
g_assert (info->bp_addrs [native_offset] == 0);
info->bp_addrs [native_offset] = mini_get_breakpoint_trampoline ();
} else {
/* ip points to a mov r11, 0 */
g_assert (code [0] == 0x41);
g_assert (code [1] == 0xbb);
amd64_mov_reg_imm (code, AMD64_R11, 1);
}
}
/*
* mono_arch_clear_breakpoint:
*
* Clear the breakpoint at IP.
*/
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8 *)ji->code_start);
info->bp_addrs [native_offset] = NULL;
} else {
amd64_mov_reg_imm (code, AMD64_R11, 0);
}
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
/* We use soft breakpoints on amd64 */
return FALSE;
}
/*
* mono_arch_skip_breakpoint:
*
* Modify CTX so the ip is placed after the breakpoint instruction, so when
* we resume, the instruction is not executed again.
*/
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
g_assert_not_reached ();
}
/*
* mono_arch_start_single_stepping:
*
* Start single stepping.
*/
void
mono_arch_start_single_stepping (void)
{
ss_trampoline = mini_get_single_step_trampoline ();
}
/*
* mono_arch_stop_single_stepping:
*
* Stop single stepping.
*/
void
mono_arch_stop_single_stepping (void)
{
ss_trampoline = NULL;
}
/*
* mono_arch_is_single_step_event:
*
* Return whenever the machine state in SIGCTX corresponds to a single
* step event.
*/
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
/* We use soft breakpoints on amd64 */
return FALSE;
}
/*
* mono_arch_skip_single_step:
*
* Modify CTX so the ip is placed after the single step trigger instruction,
* we resume, the instruction is not executed again.
*/
void
mono_arch_skip_single_step (MonoContext *ctx)
{
g_assert_not_reached ();
}
/*
* mono_arch_create_seq_point_info:
*
* Return a pointer to a data structure which is used by the sequence
* point implementation in AOTed code.
*/
SeqPointInfo*
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
/*
* We don't have access to the method etc. so use the global
* memory manager for now.
*/
jit_mm = get_default_jit_mm ();
// FIXME: Add a free function
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
// FIXME: Optimize the size
info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer)));
info->ss_tramp_addr = &ss_trampoline;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
#endif
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_U8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8:
return TRUE;
default:
return FALSE;
}
}
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
return get_call_info (mp, sig);
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
gpointer target = NULL;
switch (jit_icall_id) {
#undef MONO_AOT_ICALL
#define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
MONO_AOT_ICALL (mono_amd64_resume_unwind)
MONO_AOT_ICALL (mono_amd64_start_gsharedvt_call)
MONO_AOT_ICALL (mono_amd64_throw_corlib_exception)
MONO_AOT_ICALL (mono_amd64_throw_exception)
default:
break;
}
return target;
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-amd64.h | /**
* \file
*/
#ifndef __MONO_MINI_AMD64_H__
#define __MONO_MINI_AMD64_H__
#include <mono/arch/amd64/amd64-codegen.h>
#include <mono/utils/mono-sigcontext.h>
#include <mono/utils/mono-context.h>
#include <glib.h>
#ifdef HOST_WIN32
#include <windows.h>
#include <signal.h>
#if !defined(_MSC_VER)
/* sigcontext surrogate */
struct sigcontext {
guint64 eax;
guint64 ebx;
guint64 ecx;
guint64 edx;
guint64 ebp;
guint64 esp;
guint64 esi;
guint64 edi;
guint64 eip;
};
#endif
typedef void MONO_SIG_HANDLER_SIGNATURE ((*MonoW32ExceptionHandler));
void win32_seh_init(void);
void win32_seh_cleanup(void);
void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler);
#ifndef SIGFPE
#define SIGFPE 4
#endif
#ifndef SIGILL
#define SIGILL 8
#endif
#ifndef SIGSEGV
#define SIGSEGV 11
#endif
LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep);
typedef struct {
SRWLOCK lock;
PVOID handle;
gsize begin_range;
gsize end_range;
PRUNTIME_FUNCTION rt_funcs;
DWORD rt_funcs_current_count;
DWORD rt_funcs_max_count;
} DynamicFunctionTableEntry;
#define MONO_UNWIND_INFO_RT_FUNC_SIZE 128
typedef BOOLEAN (WINAPI* RtlInstallFunctionTableCallbackPtr)(
DWORD64 TableIdentifier,
DWORD64 BaseAddress,
DWORD Length,
PGET_RUNTIME_FUNCTION_CALLBACK Callback,
PVOID Context,
PCWSTR OutOfProcessCallbackDll);
typedef BOOLEAN (WINAPI* RtlDeleteFunctionTablePtr)(
PRUNTIME_FUNCTION FunctionTable);
// On Win8/Win2012Server and later we can use dynamic growable function tables
// instead of RtlInstallFunctionTableCallback. This gives us the benefit to
// include all needed unwind upon registration.
typedef DWORD (NTAPI* RtlAddGrowableFunctionTablePtr)(
PVOID * DynamicTable,
PRUNTIME_FUNCTION FunctionTable,
DWORD EntryCount,
DWORD MaximumEntryCount,
ULONG_PTR RangeBase,
ULONG_PTR RangeEnd);
typedef VOID (NTAPI* RtlGrowFunctionTablePtr)(
PVOID DynamicTable,
DWORD NewEntryCount);
typedef VOID (NTAPI* RtlDeleteGrowableFunctionTablePtr)(
PVOID DynamicTable);
#endif /* HOST_WIN32 */
#ifdef sun // Solaris x86
# undef SIGSEGV_ON_ALTSTACK
# define MONO_ARCH_NOMAP32BIT
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long edi;
unsigned long esi;
unsigned long ebp;
unsigned long esp;
unsigned long ebx;
unsigned long edx;
unsigned long ecx;
unsigned long eax;
unsigned long trapno;
unsigned long err;
unsigned long eip;
unsigned short cs, __csh;
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
unsigned long fpstate[95];
unsigned long filler[5];
};
#endif // sun, Solaris x86
#ifndef DISABLE_SIMD
#define MONO_ARCH_SIMD_INTRINSICS 1
#define MONO_ARCH_NEED_SIMD_BANK 1
#define MONO_ARCH_USE_SHARED_FP_SIMD_BANK 1
#endif
#if defined(__APPLE__)
#define MONO_ARCH_SIGNAL_STACK_SIZE MINSIGSTKSZ
#else
#define MONO_ARCH_SIGNAL_STACK_SIZE (16 * 1024)
#endif
#define MONO_ARCH_CPU_SPEC mono_amd64_desc
#define MONO_MAX_IREGS 16
#define MONO_MAX_FREGS AMD64_XMM_NREG
#define MONO_ARCH_FP_RETURN_REG AMD64_XMM0
#ifdef TARGET_WIN32
/* xmm5 is used as a scratch register */
#define MONO_ARCH_CALLEE_FREGS 0x1f
/* xmm6:xmm15 */
#define MONO_ARCH_CALLEE_SAVED_FREGS (0xffff - 0x3f)
#define MONO_ARCH_FP_SCRATCH_REG AMD64_XMM5
#else
/* xmm15 is used as a scratch register */
#define MONO_ARCH_CALLEE_FREGS 0x7fff
#define MONO_ARCH_CALLEE_SAVED_FREGS 0
#define MONO_ARCH_FP_SCRATCH_REG AMD64_XMM15
#endif
#define MONO_MAX_XREGS MONO_MAX_FREGS
#define MONO_ARCH_CALLEE_XREGS MONO_ARCH_CALLEE_FREGS
#define MONO_ARCH_CALLEE_SAVED_XREGS MONO_ARCH_CALLEE_SAVED_FREGS
#define MONO_ARCH_CALLEE_REGS AMD64_CALLEE_REGS
#define MONO_ARCH_CALLEE_SAVED_REGS AMD64_CALLEE_SAVED_REGS
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_FIXED_REG(desc) ((desc == '\0') ? -1 : ((desc == 'i' ? -1 : ((desc == 'a') ? AMD64_RAX : ((desc == 's') ? AMD64_RCX : ((desc == 'd') ? AMD64_RDX : ((desc == 'A') ? MONO_AMD64_ARG_REG1 : -1)))))))
/* RDX is clobbered by the opcode implementation before accessing sreg2 */
#define MONO_ARCH_INST_SREG2_MASK(ins) (((ins [MONO_INST_CLOB] == 'a') || (ins [MONO_INST_CLOB] == 'd')) ? (1 << AMD64_RDX) : 0)
#define MONO_ARCH_INST_IS_REGPAIR(desc) FALSE
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (-1)
#define MONO_ARCH_FRAME_ALIGNMENT 16
/* fixme: align to 16byte instead of 32byte (we align to 32byte to get
* reproduceable results for benchmarks */
#define MONO_ARCH_CODE_ALIGNMENT 32
struct MonoLMF {
/*
* The rsp field points to the stack location where the caller ip is saved.
* If the second lowest bit is set, then this is a MonoLMFExt structure, and
* the other fields are not valid.
* If the third lowest bit is set, then this is a MonoLMFTramp structure, and
* the 'rbp' field is not valid.
*/
gpointer previous_lmf;
guint64 rbp;
guint64 rsp;
};
/* LMF structure used by the JIT trampolines */
typedef struct {
struct MonoLMF lmf;
MonoContext *ctx;
gpointer lmf_addr;
} MonoLMFTramp;
typedef struct MonoCompileArch {
gint32 localloc_offset;
gint32 reg_save_area_offset;
gint32 stack_alloc_size;
gint32 sp_fp_offset;
guint32 saved_iregs;
gboolean omit_fp;
gboolean omit_fp_computed;
CallInfo *cinfo;
gint32 async_point_count;
MonoInst *vret_addr_loc;
MonoInst *seq_point_info_var;
MonoInst *ss_tramp_var;
MonoInst *bp_tramp_var;
MonoInst *lmf_var;
#ifdef HOST_WIN32
struct _UNWIND_INFO* unwindinfo;
#endif
} MonoCompileArch;
#ifdef TARGET_WIN32
static const AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
static const AMD64_XMM_Reg_No float_param_regs [] = { AMD64_XMM0, AMD64_XMM1, AMD64_XMM2, AMD64_XMM3 };
static const AMD64_Reg_No return_regs [] = { AMD64_RAX };
static const AMD64_XMM_Reg_No float_return_regs [] = { AMD64_XMM0 };
#define PARAM_REGS G_N_ELEMENTS(param_regs)
#define FLOAT_PARAM_REGS G_N_ELEMENTS(float_param_regs)
#define RETURN_REGS G_N_ELEMENTS(return_regs)
#define FLOAT_RETURN_REGS G_N_ELEMENTS(float_return_regs)
#else
#define PARAM_REGS 6
#define FLOAT_PARAM_REGS 8
#define RETURN_REGS 2
#define FLOAT_RETURN_REGS 2
static const AMD64_Reg_No param_regs [] = {AMD64_RDI, AMD64_RSI, AMD64_RDX,
AMD64_RCX, AMD64_R8, AMD64_R9};
static const AMD64_XMM_Reg_No float_param_regs[] = {AMD64_XMM0, AMD64_XMM1, AMD64_XMM2,
AMD64_XMM3, AMD64_XMM4, AMD64_XMM5,
AMD64_XMM6, AMD64_XMM7};
static const AMD64_Reg_No return_regs [] = {AMD64_RAX, AMD64_RDX};
#endif
typedef struct {
/* Method address to call */
gpointer addr;
/* The trampoline reads this, so keep the size explicit */
int ret_marshal;
/* If ret_marshal != NONE, this is the reg of the vret arg, else -1 (used in out case) */
/* Equivalent of vret_arg_slot in the x86 implementation. */
int vret_arg_reg;
/* The stack slot where the return value will be stored (used in in case) */
int vret_slot;
int stack_usage, map_count;
/* If not -1, then make a virtual call using this vtable offset */
int vcall_offset;
/* If 1, make an indirect call to the address in the rgctx reg */
int calli;
/* Whenever this is a in or an out call */
int gsharedvt_in;
/* Maps stack slots/registers in the caller to the stack slots/registers in the callee */
int map [MONO_ZERO_LEN_ARRAY];
} GSharedVtCallInfo;
/* Structure used by the sequence points in AOTed code */
struct SeqPointInfo {
gpointer ss_tramp_addr;
gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
};
typedef struct {
host_mgreg_t res;
guint8 *ret;
double fregs [8];
host_mgreg_t has_fp;
host_mgreg_t nstack_args;
/* This should come last as the structure is dynamically extended */
host_mgreg_t regs [PARAM_REGS];
} DynCallArgs;
typedef enum {
ArgInIReg,
ArgInFloatSSEReg,
ArgInDoubleSSEReg,
ArgOnStack,
ArgValuetypeInReg,
ArgValuetypeAddrInIReg,
ArgValuetypeAddrOnStack,
/* gsharedvt argument passed by addr */
ArgGSharedVtInReg,
ArgGSharedVtOnStack,
/* Variable sized gsharedvt argument passed/returned by addr */
ArgGsharedvtVariableInReg,
ArgNone /* only in pair_storage */
} ArgStorage;
typedef struct {
gint16 offset;
guint8 reg;
ArgStorage storage : 8;
/* Only if storage == ArgValuetypeInReg */
ArgStorage pair_storage [2];
guint8 pair_regs [2];
/* The size of each pair (bytes) */
int pair_size [2];
int nregs;
/* Only if storage == ArgOnStack */
int arg_size; // Bytes, will always be rounded up/aligned to 8 byte boundary
// Size in bytes for small arguments
int byte_arg_size;
guint8 pass_empty_struct : 1; // Set in scenarios when empty structs needs to be represented as argument.
guint8 is_signed : 1;
} ArgInfo;
struct CallInfo {
int nargs;
guint32 stack_usage;
guint32 reg_usage;
guint32 freg_usage;
gboolean need_stack_align;
gboolean gsharedvt;
/* The index of the vret arg in the argument list */
int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
};
typedef struct {
/* General registers */
host_mgreg_t gregs [AMD64_NREG];
/* Floating registers */
double fregs [AMD64_XMM_NREG];
/* Stack usage, used for passing params on stack */
guint32 stack_size;
guint8 *stack;
} CallContext;
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->gregs [AMD64_RAX] = (gsize)exc; } while (0)
#define MONO_CONTEXT_SET_LLVM_EH_SELECTOR_REG(ctx, sel) do { (ctx)->gregs [AMD64_RDX] = (gsize)(sel); } while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
#ifdef _MSC_VER
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx, start_func) do { \
guint64 stackptr; \
stackptr = ((guint64)_AddressOfReturnAddress () - sizeof (void*));\
MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
MONO_CONTEXT_SET_BP ((ctx), stackptr); \
MONO_CONTEXT_SET_SP ((ctx), stackptr); \
} while (0)
#else
/*
* __builtin_frame_address () is broken on some older gcc versions in the presence of
* frame pointer elimination, see bug #82095.
*/
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,start_func) do { \
int tmp; \
guint64 stackptr = (guint64)&tmp; \
MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
MONO_CONTEXT_SET_BP ((ctx), stackptr); \
MONO_CONTEXT_SET_SP ((ctx), stackptr); \
} while (0)
#endif
#if !defined( HOST_WIN32 ) && !defined(__HAIKU__) && defined (HAVE_SIGACTION)
#define MONO_ARCH_USE_SIGACTION 1
#ifdef HAVE_WORKING_SIGALTSTACK
#define MONO_ARCH_SIGSEGV_ON_ALTSTACK
#endif
#endif /* !HOST_WIN32 */
#if !defined(__linux__) && !defined(__sun)
#define MONO_ARCH_NOMAP32BIT 1
#endif
#ifdef TARGET_WIN32
#define MONO_AMD64_ARG_REG1 AMD64_RCX
#define MONO_AMD64_ARG_REG2 AMD64_RDX
#define MONO_AMD64_ARG_REG3 AMD64_R8
#define MONO_AMD64_ARG_REG4 AMD64_R9
#else
#define MONO_AMD64_ARG_REG1 AMD64_RDI
#define MONO_AMD64_ARG_REG2 AMD64_RSI
#define MONO_AMD64_ARG_REG3 AMD64_RDX
#define MONO_AMD64_ARG_REG4 AMD64_RCX
#endif
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS
#define MONO_ARCH_EMULATE_CONV_R8_UN 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
// x64 FullAOT+LLVM fails to pass the basic-float tests without this.
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
#define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_IMT_REG AMD64_R10
#define MONO_ARCH_IMT_SCRATCH_REG AMD64_R11
#define MONO_ARCH_VTABLE_REG MONO_AMD64_ARG_REG1
/*
* We use r10 for the imt/rgctx register rather than r11 because r11 is
* used by the trampoline as a scratch register and hence might be
* clobbered across method call boundaries.
*/
#define MONO_ARCH_RGCTX_REG MONO_ARCH_IMT_REG
#define MONO_ARCH_HAVE_CMOV_OPS 1
#define MONO_ARCH_HAVE_EXCEPTIONS_INIT 1
#define MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE 1
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#define MONO_ARCH_SUPPORT_TASKLETS 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_PARAM_AREA 0
#define MONO_ARCH_LLVM_SUPPORTED 1
#if defined(HOST_WIN32) && defined(TARGET_WIN32) && !defined(_MSC_VER)
// Only supported for Windows cross compiler builds, host == Win32, target != Win32
// and only using MSVC for none cross compiler builds.
#undef MONO_ARCH_LLVM_SUPPORTED
#endif
#define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_GC_MAPS_SUPPORTED 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_CREATE_LLVM_NATIVE_THUNK 1
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-i64:64-i128:128-n8:16:32:64-S128"
#define MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP
#define MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE 1
#define MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED 1
#if defined(TARGET_OSX) || defined(__linux__)
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#endif
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#if defined(HOST_TVOS) || defined(HOST_WATCHOS)
/* Neither tvOS nor watchOS give signal handlers access to a ucontext_t, so we
* can't use signals to translate SIGFPE into a .NET-level exception. */
#define MONO_ARCH_NEED_DIV_CHECK 1
#endif
#if defined(TARGET_TVOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_EXPLICIT_NULL_CHECKS 1
#endif
/* Used for optimization, not complete */
#define MONO_ARCH_IS_OP_MEMBASE(opcode) ((opcode) == OP_X86_PUSH_MEMBASE)
#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg, ex_name) do { \
MonoInst *inst; \
MONO_INST_NEW ((cfg), inst, OP_AMD64_ICOMPARE_MEMBASE_REG); \
inst->inst_basereg = array_reg; \
inst->inst_offset = offset; \
inst->sreg2 = index_reg; \
MONO_ADD_INS ((cfg)->cbb, inst); \
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, ex_name); \
} while (0)
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 1
void
mono_amd64_patch (unsigned char* code, gpointer target);
void
mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
MonoContext *mctx, MonoObject *exc, gboolean rethrow, gboolean preserve_ips);
void
mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset);
void
mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
MonoContext *mctx, guint32 dummy7, gint64 dummy8);
gpointer
mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg);
GSList*
mono_amd64_get_exception_trampolines (gboolean aot);
int
mono_amd64_get_tls_gs_offset (void);
#if defined(TARGET_WIN32) && !defined(DISABLE_JIT)
#define MONO_ARCH_HAVE_UNWIND_TABLE 1
#define MONO_ARCH_HAVE_CODE_CHUNK_TRACKING 1
#ifdef ENABLE_CHECKED_BUILD
#define ENABLE_CHECKED_BUILD_UNWINDINFO
#endif
#define MONO_MAX_UNWIND_CODES 22
typedef enum _UNWIND_OP_CODES {
UWOP_PUSH_NONVOL = 0, /* info == register number */
UWOP_ALLOC_LARGE, /* no info, alloc size in next 2 slots */
UWOP_ALLOC_SMALL, /* info == size of allocation / 8 - 1 */
UWOP_SET_FPREG, /* no info, FP = RSP + UNWIND_INFO.FPRegOffset*16 */
UWOP_SAVE_NONVOL, /* info == register number, offset in next slot */
UWOP_SAVE_NONVOL_FAR, /* info == register number, offset in next 2 slots */
UWOP_SAVE_XMM128, /* info == XMM reg number, offset in next slot */
UWOP_SAVE_XMM128_FAR, /* info == XMM reg number, offset in next 2 slots */
UWOP_PUSH_MACHFRAME /* info == 0: no error-code, 1: error-code */
} UNWIND_CODE_OPS;
typedef union _UNWIND_CODE {
struct {
guchar CodeOffset;
guchar UnwindOp : 4;
guchar OpInfo : 4;
};
gushort FrameOffset;
} UNWIND_CODE, *PUNWIND_CODE;
typedef struct _UNWIND_INFO {
guchar Version : 3;
guchar Flags : 5;
guchar SizeOfProlog;
guchar CountOfCodes;
guchar FrameRegister : 4;
guchar FrameOffset : 4;
UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
/* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
* union {
* OPTIONAL ULONG ExceptionHandler;
* OPTIONAL ULONG FunctionEntry;
* };
* OPTIONAL ULONG ExceptionData[]; */
} UNWIND_INFO, *PUNWIND_INFO;
static inline guint
mono_arch_unwindinfo_get_size (guchar code_count)
{
// Returned size will be used as the allocated size for unwind data trailing the memory used by compiled method.
// Windows x64 ABI have some requirements on the data written into this memory. Both the RUNTIME_FUNCTION
// and UNWIND_INFO struct needs to be DWORD aligned and the number of elements in unwind codes array
// should have an even number of entries, while the count stored in UNWIND_INFO struct should hold the real number
// of unwind codes. Adding extra bytes to the total size will make sure we can properly align the RUNTIME_FUNCTION
// struct. Since our UNWIND_INFO follows RUNTIME_FUNCTION struct in memory, it will automatically be DWORD aligned
// as well. Also make sure to allocate room for a padding UNWIND_CODE, if needed.
return (sizeof (target_mgreg_t) + sizeof (UNWIND_INFO)) -
(sizeof (UNWIND_CODE) * ((MONO_MAX_UNWIND_CODES - ((code_count + 1) & ~1))));
/* FIXME Something simpler should work:
return sizeof (UNWIND_INFO) + sizeof (UNWIND_CODE) * (code_count + (code_count & 1));
*/
}
guchar
mono_arch_unwindinfo_get_code_count (GSList *unwind_ops);
PUNWIND_INFO
mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops);
void
mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info);
guint
mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg);
void
mono_arch_unwindinfo_install_method_unwind_info (PUNWIND_INFO *monoui, gpointer code, guint code_size);
void
mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size);
void
mono_arch_code_chunk_new (void *chunk, int size);
void
mono_arch_code_chunk_destroy (void *chunk);
#endif /* defined(TARGET_WIN32) && !defined(DISABLE_JIT) */
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
// Allocate additional size for max 3 unwind ops (push + fp or sp small|large) + unwind info struct trailing code buffer.
#define MONO_TRAMPOLINE_UNWINDINFO_SIZE(max_code_count) (mono_arch_unwindinfo_get_size (max_code_count))
#define MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE (MONO_TRAMPOLINE_UNWINDINFO_SIZE(3))
static inline gboolean
mono_arch_unwindinfo_validate_size (GSList *unwind_ops, guint max_size)
{
guint current_size = mono_arch_unwindinfo_get_size (mono_arch_unwindinfo_get_code_count (unwind_ops));
return current_size <= max_size;
}
#else
#define MONO_TRAMPOLINE_UNWINDINFO_SIZE(max_code_count) 0
#define MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE 0
static inline gboolean
mono_arch_unwindinfo_validate_size (GSList *unwind_ops, guint max_size)
{
return TRUE;
}
#endif
CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig);
#endif /* __MONO_MINI_AMD64_H__ */
| /**
* \file
*/
#ifndef __MONO_MINI_AMD64_H__
#define __MONO_MINI_AMD64_H__
#include <mono/arch/amd64/amd64-codegen.h>
#include <mono/utils/mono-sigcontext.h>
#include <mono/utils/mono-context.h>
#include <glib.h>
#ifdef HOST_WIN32
#include <windows.h>
#include <signal.h>
#if !defined(_MSC_VER)
/* sigcontext surrogate */
struct sigcontext {
guint64 eax;
guint64 ebx;
guint64 ecx;
guint64 edx;
guint64 ebp;
guint64 esp;
guint64 esi;
guint64 edi;
guint64 eip;
};
#endif
typedef void MONO_SIG_HANDLER_SIGNATURE ((*MonoW32ExceptionHandler));
void win32_seh_init(void);
void win32_seh_cleanup(void);
void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler);
#ifndef SIGFPE
#define SIGFPE 4
#endif
#ifndef SIGILL
#define SIGILL 8
#endif
#ifndef SIGSEGV
#define SIGSEGV 11
#endif
LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep);
typedef struct {
SRWLOCK lock;
PVOID handle;
gsize begin_range;
gsize end_range;
PRUNTIME_FUNCTION rt_funcs;
DWORD rt_funcs_current_count;
DWORD rt_funcs_max_count;
} DynamicFunctionTableEntry;
#define MONO_UNWIND_INFO_RT_FUNC_SIZE 128
typedef BOOLEAN (WINAPI* RtlInstallFunctionTableCallbackPtr)(
DWORD64 TableIdentifier,
DWORD64 BaseAddress,
DWORD Length,
PGET_RUNTIME_FUNCTION_CALLBACK Callback,
PVOID Context,
PCWSTR OutOfProcessCallbackDll);
typedef BOOLEAN (WINAPI* RtlDeleteFunctionTablePtr)(
PRUNTIME_FUNCTION FunctionTable);
// On Win8/Win2012Server and later we can use dynamic growable function tables
// instead of RtlInstallFunctionTableCallback. This gives us the benefit to
// include all needed unwind upon registration.
typedef DWORD (NTAPI* RtlAddGrowableFunctionTablePtr)(
PVOID * DynamicTable,
PRUNTIME_FUNCTION FunctionTable,
DWORD EntryCount,
DWORD MaximumEntryCount,
ULONG_PTR RangeBase,
ULONG_PTR RangeEnd);
typedef VOID (NTAPI* RtlGrowFunctionTablePtr)(
PVOID DynamicTable,
DWORD NewEntryCount);
typedef VOID (NTAPI* RtlDeleteGrowableFunctionTablePtr)(
PVOID DynamicTable);
#endif /* HOST_WIN32 */
#ifdef sun // Solaris x86
# undef SIGSEGV_ON_ALTSTACK
# define MONO_ARCH_NOMAP32BIT
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long edi;
unsigned long esi;
unsigned long ebp;
unsigned long esp;
unsigned long ebx;
unsigned long edx;
unsigned long ecx;
unsigned long eax;
unsigned long trapno;
unsigned long err;
unsigned long eip;
unsigned short cs, __csh;
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
unsigned long fpstate[95];
unsigned long filler[5];
};
#endif // sun, Solaris x86
#ifndef DISABLE_SIMD
#define MONO_ARCH_SIMD_INTRINSICS 1
#define MONO_ARCH_NEED_SIMD_BANK 1
#define MONO_ARCH_USE_SHARED_FP_SIMD_BANK 1
#endif
#if defined(__APPLE__)
#define MONO_ARCH_SIGNAL_STACK_SIZE MINSIGSTKSZ
#else
#define MONO_ARCH_SIGNAL_STACK_SIZE (16 * 1024)
#endif
#define MONO_ARCH_CPU_SPEC mono_amd64_desc
#define MONO_MAX_IREGS 16
#define MONO_MAX_FREGS AMD64_XMM_NREG
#define MONO_ARCH_FP_RETURN_REG AMD64_XMM0
#ifdef TARGET_WIN32
/* xmm5 is used as a scratch register */
#define MONO_ARCH_CALLEE_FREGS 0x1f
/* xmm6:xmm15 */
#define MONO_ARCH_CALLEE_SAVED_FREGS (0xffff - 0x3f)
#define MONO_ARCH_FP_SCRATCH_REG AMD64_XMM5
#else
/* xmm15 is used as a scratch register */
#define MONO_ARCH_CALLEE_FREGS 0x7fff
#define MONO_ARCH_CALLEE_SAVED_FREGS 0
#define MONO_ARCH_FP_SCRATCH_REG AMD64_XMM15
#endif
#define MONO_MAX_XREGS MONO_MAX_FREGS
#define MONO_ARCH_CALLEE_XREGS MONO_ARCH_CALLEE_FREGS
#define MONO_ARCH_CALLEE_SAVED_XREGS MONO_ARCH_CALLEE_SAVED_FREGS
#define MONO_ARCH_CALLEE_REGS AMD64_CALLEE_REGS
#define MONO_ARCH_CALLEE_SAVED_REGS AMD64_CALLEE_SAVED_REGS
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_FIXED_REG(desc) ((desc == '\0') ? -1 : ((desc == 'i' ? -1 : ((desc == 'a') ? AMD64_RAX : ((desc == 's') ? AMD64_RCX : ((desc == 'd') ? AMD64_RDX : ((desc == 'A') ? MONO_AMD64_ARG_REG1 : -1)))))))
/* RDX is clobbered by the opcode implementation before accessing sreg2 */
#define MONO_ARCH_INST_SREG2_MASK(ins) (((ins [MONO_INST_CLOB] == 'a') || (ins [MONO_INST_CLOB] == 'd')) ? (1 << AMD64_RDX) : 0)
#define MONO_ARCH_INST_IS_REGPAIR(desc) FALSE
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (-1)
#define MONO_ARCH_FRAME_ALIGNMENT 16
/* fixme: align to 16byte instead of 32byte (we align to 32byte to get
* reproduceable results for benchmarks */
#define MONO_ARCH_CODE_ALIGNMENT 32
struct MonoLMF {
/*
* The rsp field points to the stack location where the caller ip is saved.
* If the second lowest bit is set, then this is a MonoLMFExt structure, and
* the other fields are not valid.
* If the third lowest bit is set, then this is a MonoLMFTramp structure, and
* the 'rbp' field is not valid.
*/
gpointer previous_lmf;
guint64 rbp;
guint64 rsp;
};
/* LMF structure used by the JIT trampolines */
typedef struct {
struct MonoLMF lmf;
MonoContext *ctx;
gpointer lmf_addr;
} MonoLMFTramp;
typedef struct MonoCompileArch {
gint32 localloc_offset;
gint32 reg_save_area_offset;
gint32 stack_alloc_size;
gint32 sp_fp_offset;
guint32 saved_iregs;
gboolean omit_fp;
gboolean omit_fp_computed;
CallInfo *cinfo;
gint32 async_point_count;
MonoInst *vret_addr_loc;
MonoInst *seq_point_info_var;
MonoInst *ss_tramp_var;
MonoInst *bp_tramp_var;
MonoInst *lmf_var;
#ifdef HOST_WIN32
struct _UNWIND_INFO* unwindinfo;
#endif
} MonoCompileArch;
#ifdef TARGET_WIN32
static const AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
static const AMD64_XMM_Reg_No float_param_regs [] = { AMD64_XMM0, AMD64_XMM1, AMD64_XMM2, AMD64_XMM3 };
static const AMD64_Reg_No return_regs [] = { AMD64_RAX };
static const AMD64_XMM_Reg_No float_return_regs [] = { AMD64_XMM0 };
#define PARAM_REGS G_N_ELEMENTS(param_regs)
#define FLOAT_PARAM_REGS G_N_ELEMENTS(float_param_regs)
#define RETURN_REGS G_N_ELEMENTS(return_regs)
#define FLOAT_RETURN_REGS G_N_ELEMENTS(float_return_regs)
#else
#define PARAM_REGS 6
#define FLOAT_PARAM_REGS 8
#define RETURN_REGS 2
#define FLOAT_RETURN_REGS 2
static const AMD64_Reg_No param_regs [] = {AMD64_RDI, AMD64_RSI, AMD64_RDX,
AMD64_RCX, AMD64_R8, AMD64_R9};
static const AMD64_XMM_Reg_No float_param_regs[] = {AMD64_XMM0, AMD64_XMM1, AMD64_XMM2,
AMD64_XMM3, AMD64_XMM4, AMD64_XMM5,
AMD64_XMM6, AMD64_XMM7};
static const AMD64_Reg_No return_regs [] = {AMD64_RAX, AMD64_RDX};
#endif
typedef struct {
/* Method address to call */
gpointer addr;
/* The trampoline reads this, so keep the size explicit */
int ret_marshal;
/* If ret_marshal != NONE, this is the reg of the vret arg, else -1 (used in out case) */
/* Equivalent of vret_arg_slot in the x86 implementation. */
int vret_arg_reg;
/* The stack slot where the return value will be stored (used in in case) */
int vret_slot;
int stack_usage, map_count;
/* If not -1, then make a virtual call using this vtable offset */
int vcall_offset;
/* If 1, make an indirect call to the address in the rgctx reg */
int calli;
/* Whenever this is a in or an out call */
int gsharedvt_in;
/* Maps stack slots/registers in the caller to the stack slots/registers in the callee */
int map [MONO_ZERO_LEN_ARRAY];
} GSharedVtCallInfo;
/* Structure used by the sequence points in AOTed code */
struct SeqPointInfo {
gpointer ss_tramp_addr;
gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
};
typedef struct {
host_mgreg_t res;
guint8 *ret;
double fregs [8];
host_mgreg_t has_fp;
host_mgreg_t nstack_args;
/* This should come last as the structure is dynamically extended */
host_mgreg_t regs [PARAM_REGS];
} DynCallArgs;
typedef enum {
ArgInIReg,
ArgInFloatSSEReg,
ArgInDoubleSSEReg,
ArgOnStack,
ArgValuetypeInReg,
ArgValuetypeAddrInIReg,
ArgValuetypeAddrOnStack,
/* gsharedvt argument passed by addr */
ArgGSharedVtInReg,
ArgGSharedVtOnStack,
/* Variable sized gsharedvt argument passed/returned by addr */
ArgGsharedvtVariableInReg,
ArgNone /* only in pair_storage */
} ArgStorage;
typedef struct {
gint16 offset;
guint8 reg;
ArgStorage storage : 8;
/* Only if storage == ArgValuetypeInReg */
ArgStorage pair_storage [2];
guint8 pair_regs [2];
/* The size of each pair (bytes) */
int pair_size [2];
int nregs;
/* Only if storage == ArgOnStack */
int arg_size; // Bytes, will always be rounded up/aligned to 8 byte boundary
// Size in bytes for small arguments
int byte_arg_size;
guint8 pass_empty_struct : 1; // Set in scenarios when empty structs needs to be represented as argument.
guint8 is_signed : 1;
} ArgInfo;
struct CallInfo {
int nargs;
guint32 stack_usage;
guint32 reg_usage;
guint32 freg_usage;
gboolean need_stack_align;
gboolean gsharedvt;
/* The index of the vret arg in the argument list */
int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
};
typedef struct {
/* General registers */
host_mgreg_t gregs [AMD64_NREG];
/* Floating registers */
double fregs [AMD64_XMM_NREG];
/* Stack usage, used for passing params on stack */
guint32 stack_size;
guint8 *stack;
} CallContext;
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->gregs [AMD64_RAX] = (gsize)exc; } while (0)
#define MONO_CONTEXT_SET_LLVM_EH_SELECTOR_REG(ctx, sel) do { (ctx)->gregs [AMD64_RDX] = (gsize)(sel); } while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
#ifdef _MSC_VER
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx, start_func) do { \
guint64 stackptr; \
stackptr = ((guint64)_AddressOfReturnAddress () - sizeof (void*));\
MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
MONO_CONTEXT_SET_BP ((ctx), stackptr); \
MONO_CONTEXT_SET_SP ((ctx), stackptr); \
} while (0)
#else
/*
* __builtin_frame_address () is broken on some older gcc versions in the presence of
* frame pointer elimination, see bug #82095.
*/
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,start_func) do { \
int tmp; \
guint64 stackptr = (guint64)&tmp; \
MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
MONO_CONTEXT_SET_BP ((ctx), stackptr); \
MONO_CONTEXT_SET_SP ((ctx), stackptr); \
} while (0)
#endif
#if !defined( HOST_WIN32 ) && !defined(__HAIKU__) && defined (HAVE_SIGACTION)
#define MONO_ARCH_USE_SIGACTION 1
#ifdef HAVE_WORKING_SIGALTSTACK
#define MONO_ARCH_SIGSEGV_ON_ALTSTACK
#endif
#endif /* !HOST_WIN32 */
#if !defined(__linux__) && !defined(__sun)
#define MONO_ARCH_NOMAP32BIT 1
#endif
#ifdef TARGET_WIN32
#define MONO_AMD64_ARG_REG1 AMD64_RCX
#define MONO_AMD64_ARG_REG2 AMD64_RDX
#define MONO_AMD64_ARG_REG3 AMD64_R8
#define MONO_AMD64_ARG_REG4 AMD64_R9
#else
#define MONO_AMD64_ARG_REG1 AMD64_RDI
#define MONO_AMD64_ARG_REG2 AMD64_RSI
#define MONO_AMD64_ARG_REG3 AMD64_RDX
#define MONO_AMD64_ARG_REG4 AMD64_RCX
#endif
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS
#define MONO_ARCH_EMULATE_CONV_R8_UN 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
// x64 FullAOT+LLVM fails to pass the basic-float tests without this.
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
#define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_IMT_REG AMD64_R10
#define MONO_ARCH_IMT_SCRATCH_REG AMD64_R11
#define MONO_ARCH_VTABLE_REG MONO_AMD64_ARG_REG1
/*
* We use r10 for the imt/rgctx register rather than r11 because r11 is
* used by the trampoline as a scratch register and hence might be
* clobbered across method call boundaries.
*/
#define MONO_ARCH_RGCTX_REG MONO_ARCH_IMT_REG
#define MONO_ARCH_HAVE_CMOV_OPS 1
#define MONO_ARCH_HAVE_EXCEPTIONS_INIT 1
#define MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE 1
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#define MONO_ARCH_SUPPORT_TASKLETS 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_PARAM_AREA 0
#define MONO_ARCH_LLVM_SUPPORTED 1
#if defined(HOST_WIN32) && defined(TARGET_WIN32) && !defined(_MSC_VER)
// Only supported for Windows cross compiler builds, host == Win32, target != Win32
// and only using MSVC for none cross compiler builds.
#undef MONO_ARCH_LLVM_SUPPORTED
#endif
#define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_GC_MAPS_SUPPORTED 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_CREATE_LLVM_NATIVE_THUNK 1
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
#define MONO_ARCH_FLOAT32_SUPPORTED 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-i64:64-i128:128-n8:16:32:64-S128"
#define MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP
#define MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE 1
#define MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED 1
#if defined(TARGET_OSX) || defined(__linux__)
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#endif
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#if defined(HOST_TVOS) || defined(HOST_WATCHOS)
/* Neither tvOS nor watchOS give signal handlers access to a ucontext_t, so we
* can't use signals to translate SIGFPE into a .NET-level exception. */
#define MONO_ARCH_NEED_DIV_CHECK 1
#endif
#if defined(TARGET_TVOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_EXPLICIT_NULL_CHECKS 1
#endif
/* Used for optimization, not complete */
#define MONO_ARCH_IS_OP_MEMBASE(opcode) ((opcode) == OP_X86_PUSH_MEMBASE)
#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg, ex_name) do { \
MonoInst *inst; \
MONO_INST_NEW ((cfg), inst, OP_AMD64_ICOMPARE_MEMBASE_REG); \
inst->inst_basereg = array_reg; \
inst->inst_offset = offset; \
inst->sreg2 = index_reg; \
MONO_ADD_INS ((cfg)->cbb, inst); \
MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, ex_name); \
} while (0)
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 1
void
mono_amd64_patch (unsigned char* code, gpointer target);
void
mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
MonoContext *mctx, MonoObject *exc, gboolean rethrow, gboolean preserve_ips);
void
mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset);
void
mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
guint64 dummy5, guint64 dummy6,
MonoContext *mctx, guint32 dummy7, gint64 dummy8);
gpointer
mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg);
GSList*
mono_amd64_get_exception_trampolines (gboolean aot);
int
mono_amd64_get_tls_gs_offset (void);
#if defined(TARGET_WIN32) && !defined(DISABLE_JIT)
#define MONO_ARCH_HAVE_UNWIND_TABLE 1
#define MONO_ARCH_HAVE_CODE_CHUNK_TRACKING 1
#ifdef ENABLE_CHECKED_BUILD
#define ENABLE_CHECKED_BUILD_UNWINDINFO
#endif
#define MONO_MAX_UNWIND_CODES 22
typedef enum _UNWIND_OP_CODES {
UWOP_PUSH_NONVOL = 0, /* info == register number */
UWOP_ALLOC_LARGE, /* no info, alloc size in next 2 slots */
UWOP_ALLOC_SMALL, /* info == size of allocation / 8 - 1 */
UWOP_SET_FPREG, /* no info, FP = RSP + UNWIND_INFO.FPRegOffset*16 */
UWOP_SAVE_NONVOL, /* info == register number, offset in next slot */
UWOP_SAVE_NONVOL_FAR, /* info == register number, offset in next 2 slots */
UWOP_SAVE_XMM128, /* info == XMM reg number, offset in next slot */
UWOP_SAVE_XMM128_FAR, /* info == XMM reg number, offset in next 2 slots */
UWOP_PUSH_MACHFRAME /* info == 0: no error-code, 1: error-code */
} UNWIND_CODE_OPS;
typedef union _UNWIND_CODE {
struct {
guchar CodeOffset;
guchar UnwindOp : 4;
guchar OpInfo : 4;
};
gushort FrameOffset;
} UNWIND_CODE, *PUNWIND_CODE;
typedef struct _UNWIND_INFO {
guchar Version : 3;
guchar Flags : 5;
guchar SizeOfProlog;
guchar CountOfCodes;
guchar FrameRegister : 4;
guchar FrameOffset : 4;
UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
/* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
* union {
* OPTIONAL ULONG ExceptionHandler;
* OPTIONAL ULONG FunctionEntry;
* };
* OPTIONAL ULONG ExceptionData[]; */
} UNWIND_INFO, *PUNWIND_INFO;
static inline guint
mono_arch_unwindinfo_get_size (guchar code_count)
{
// Returned size will be used as the allocated size for unwind data trailing the memory used by compiled method.
// Windows x64 ABI have some requirements on the data written into this memory. Both the RUNTIME_FUNCTION
// and UNWIND_INFO struct needs to be DWORD aligned and the number of elements in unwind codes array
// should have an even number of entries, while the count stored in UNWIND_INFO struct should hold the real number
// of unwind codes. Adding extra bytes to the total size will make sure we can properly align the RUNTIME_FUNCTION
// struct. Since our UNWIND_INFO follows RUNTIME_FUNCTION struct in memory, it will automatically be DWORD aligned
// as well. Also make sure to allocate room for a padding UNWIND_CODE, if needed.
return (sizeof (target_mgreg_t) + sizeof (UNWIND_INFO)) -
(sizeof (UNWIND_CODE) * ((MONO_MAX_UNWIND_CODES - ((code_count + 1) & ~1))));
/* FIXME Something simpler should work:
return sizeof (UNWIND_INFO) + sizeof (UNWIND_CODE) * (code_count + (code_count & 1));
*/
}
guchar
mono_arch_unwindinfo_get_code_count (GSList *unwind_ops);
PUNWIND_INFO
mono_arch_unwindinfo_alloc_unwind_info (GSList *unwind_ops);
void
mono_arch_unwindinfo_free_unwind_info (PUNWIND_INFO unwind_info);
guint
mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg);
void
mono_arch_unwindinfo_install_method_unwind_info (PUNWIND_INFO *monoui, gpointer code, guint code_size);
void
mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size);
void
mono_arch_code_chunk_new (void *chunk, int size);
void
mono_arch_code_chunk_destroy (void *chunk);
#endif /* defined(TARGET_WIN32) && !defined(DISABLE_JIT) */
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
// Allocate additional size for max 3 unwind ops (push + fp or sp small|large) + unwind info struct trailing code buffer.
#define MONO_TRAMPOLINE_UNWINDINFO_SIZE(max_code_count) (mono_arch_unwindinfo_get_size (max_code_count))
#define MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE (MONO_TRAMPOLINE_UNWINDINFO_SIZE(3))
static inline gboolean
mono_arch_unwindinfo_validate_size (GSList *unwind_ops, guint max_size)
{
guint current_size = mono_arch_unwindinfo_get_size (mono_arch_unwindinfo_get_code_count (unwind_ops));
return current_size <= max_size;
}
#else
#define MONO_TRAMPOLINE_UNWINDINFO_SIZE(max_code_count) 0
#define MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE 0
static inline gboolean
mono_arch_unwindinfo_validate_size (GSList *unwind_ops, guint max_size)
{
return TRUE;
}
#endif
CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig);
#endif /* __MONO_MINI_AMD64_H__ */
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-arm.c | /**
* \file
* ARM backend for the Mono code generator
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include <string.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/tokentype.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/unlocked.h>
#include "interp/interp.h"
#include "mini-arm.h"
#include "cpu-arm.h"
#include "ir-emit.h"
#include "mini-gc.h"
#include "mini-runtime.h"
#include "aot-runtime.h"
#include "mono/arch/arm/arm-vfp-codegen.h"
#include "mono/utils/mono-tls-inline.h"
/* Sanity check: This makes no sense */
#if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
#error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
#endif
/*
* IS_SOFT_FLOAT: Is full software floating point used?
* IS_HARD_FLOAT: Is full hardware floating point used?
* IS_VFP: Is hardware floating point with software ABI used?
*
* These are not necessarily constants, e.g. IS_SOFT_FLOAT and
* IS_VFP may delegate to mono_arch_is_soft_float ().
*/
#if defined(ARM_FPU_VFP_HARD)
#define IS_SOFT_FLOAT (FALSE)
#define IS_HARD_FLOAT (TRUE)
#define IS_VFP (TRUE)
#elif defined(ARM_FPU_NONE)
#define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
#define IS_HARD_FLOAT (FALSE)
#define IS_VFP (!mono_arch_is_soft_float ())
#else
#define IS_SOFT_FLOAT (FALSE)
#define IS_HARD_FLOAT (FALSE)
#define IS_VFP (TRUE)
#endif
#define THUNK_SIZE (3 * 4)
#if __APPLE__
G_BEGIN_DECLS
void sys_icache_invalidate (void *start, size_t len);
G_END_DECLS
#endif
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
#define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
static mono_mutex_t mini_arch_mutex;
static gboolean v5_supported = FALSE;
static gboolean v6_supported = FALSE;
static gboolean v7_supported = FALSE;
static gboolean v7s_supported = FALSE;
static gboolean v7k_supported = FALSE;
static gboolean thumb_supported = FALSE;
static gboolean thumb2_supported = FALSE;
/*
* Whenever to use the ARM EABI
*/
static gboolean eabi_supported = FALSE;
/*
* Whenever to use the iphone ABI extensions:
* http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
* Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
* This is required for debugging/profiling tools to work, but it has some overhead so it should
* only be turned on in debug builds.
*/
static gboolean iphone_abi = FALSE;
/*
* The FPU we are generating code for. This is NOT runtime configurable right now,
* since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
*/
static MonoArmFPU arm_fpu;
#if defined(ARM_FPU_VFP_HARD)
/*
* On armhf, d0-d7 are used for argument passing and d8-d15
* must be preserved across calls, which leaves us no room
* for scratch registers. So we use d14-d15 but back up their
* previous contents to a stack slot before using them - see
* mono_arm_emit_vfp_scratch_save/_restore ().
*/
static int vfp_scratch1 = ARM_VFP_D14;
static int vfp_scratch2 = ARM_VFP_D15;
#else
/*
* On armel, d0-d7 do not need to be preserved, so we can
* freely make use of them as scratch registers.
*/
static int vfp_scratch1 = ARM_VFP_D0;
static int vfp_scratch2 = ARM_VFP_D1;
#endif
static int i8_align;
static gpointer single_step_tramp, breakpoint_tramp;
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
*/
static gpointer ss_trigger_page;
/* Enabled breakpoints read from this trigger page */
static gpointer bp_trigger_page;
/*
* TODO:
* floating point support: on ARM it is a mess, there are at least 3
* different setups, each of which binary incompat with the other.
* 1) FPA: old and ugly, but unfortunately what current distros use
* the double binary format has the two words swapped. 8 double registers.
* Implemented usually by kernel emulation.
* 2) softfloat: the compiler emulates all the fp ops. Usually uses the
* ugly swapped double format (I guess a softfloat-vfp exists, too, though).
* 3) VFP: the new and actually sensible and useful FP support. Implemented
* in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
*
* We do not care about FPA. We will support soft float and VFP.
*/
#define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
#define arm_is_imm8(v) ((v) > -256 && (v) < 256)
#define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
#define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
#define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
#define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
//#define DEBUG_IMT 0
#ifndef DISABLE_JIT
static void mono_arch_compute_omit_fp (MonoCompile *cfg);
#endif
static guint8*
emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data);
const char*
mono_arch_regname (int reg)
{
static const char * rnames[] = {
"arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
"arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
"arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
"arm_pc"
};
if (reg >= 0 && reg < 16)
return rnames [reg];
return "unknown";
}
const char*
mono_arch_fregname (int reg)
{
static const char * rnames[] = {
"arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
"arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
"arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
"arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
"arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
"arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
"arm_f30", "arm_f31"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown";
}
#ifndef DISABLE_JIT
static guint8*
emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp)
{
int imm8, rot_amount;
g_assert (temp == ARMREG_IP || temp == ARMREG_LR);
if (imm == 0) {
if (sreg != dreg)
ARM_MOV_REG_REG (code, dreg, sreg);
} else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
return code;
}
if (dreg == sreg) {
code = mono_arm_emit_load_imm (code, temp, imm);
ARM_ADD_REG_REG (code, dreg, sreg, temp);
} else {
code = mono_arm_emit_load_imm (code, dreg, imm);
ARM_ADD_REG_REG (code, dreg, dreg, sreg);
}
return code;
}
static guint8*
emit_big_add (guint8 *code, int dreg, int sreg, int imm)
{
return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP);
}
static guint8*
emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_imm12 (imm)) {
g_assert (dreg != sreg);
code = emit_big_add (code, dreg, sreg, imm);
ARM_LDR_IMM (code, dreg, dreg, 0);
} else {
ARM_LDR_IMM (code, dreg, sreg, imm);
}
return code;
}
/* If dreg == sreg, this clobbers IP */
static guint8*
emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
{
int imm8, rot_amount;
if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
return code;
}
if (dreg == sreg) {
code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
} else {
code = mono_arm_emit_load_imm (code, dreg, imm);
ARM_SUB_REG_REG (code, dreg, dreg, sreg);
}
return code;
}
static guint8*
emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
{
/* we can use r0-r3, since this is called only for incoming args on the stack */
if (size > sizeof (target_mgreg_t) * 4) {
guint8 *start_loop;
code = emit_big_add (code, ARMREG_R0, sreg, soffset);
code = emit_big_add (code, ARMREG_R1, dreg, doffset);
start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (code - 4, start_loop);
return code;
}
if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
while (size >= 4) {
ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
doffset += 4;
soffset += 4;
size -= 4;
}
} else if (size) {
code = emit_big_add (code, ARMREG_R0, sreg, soffset);
code = emit_big_add (code, ARMREG_R1, dreg, doffset);
doffset = soffset = 0;
while (size >= 4) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
doffset += 4;
soffset += 4;
size -= 4;
}
}
g_assert (size == 0);
return code;
}
static guint8*
emit_jmp_reg (guint8 *code, int reg)
{
if (thumb_supported)
ARM_BX (code, reg);
else
ARM_MOV_REG_REG (code, ARMREG_PC, reg);
return code;
}
static guint8*
emit_call_reg (guint8 *code, int reg)
{
if (v5_supported) {
ARM_BLX_REG (code, reg);
} else {
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
return emit_jmp_reg (code, reg);
}
return code;
}
static guint8*
emit_call_seq (MonoCompile *cfg, guint8 *code)
{
if (cfg->method->dynamic) {
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
code = emit_call_reg (code, ARMREG_IP);
} else {
ARM_BL (code, 0);
}
cfg->thunk_area += THUNK_SIZE;
return code;
}
guint8*
mono_arm_patchable_b (guint8 *code, int cond)
{
ARM_B_COND (code, cond, 0);
return code;
}
guint8*
mono_arm_patchable_bl (guint8 *code, int cond)
{
ARM_BL_COND (code, cond, 0);
return code;
}
#if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
#define HAVE_AEABI_READ_TP 1
#endif
#ifdef HAVE_AEABI_READ_TP
G_BEGIN_DECLS
gpointer __aeabi_read_tp (void);
G_END_DECLS
#endif
gboolean
mono_arch_have_fast_tls (void)
{
#ifdef HAVE_AEABI_READ_TP
static gboolean have_fast_tls = FALSE;
static gboolean inited = FALSE;
if (mini_debug_options.use_fallback_tls)
return FALSE;
if (inited)
return have_fast_tls;
if (v7_supported) {
gpointer tp1, tp2;
tp1 = __aeabi_read_tp ();
asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2));
have_fast_tls = tp1 && tp1 == tp2;
}
inited = TRUE;
return have_fast_tls;
#else
return FALSE;
#endif
}
static guint8*
emit_tls_get (guint8 *code, int dreg, int tls_offset)
{
g_assert (v7_supported);
ARM_MRC (code, 15, 0, dreg, 13, 0, 3);
ARM_LDR_IMM (code, dreg, dreg, tls_offset);
return code;
}
static guint8*
emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1;
g_assert (v7_supported);
ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3);
ARM_STR_IMM (code, sreg, tp_reg, tls_offset);
return code;
}
/*
* emit_save_lmf:
*
* Emit code to push an LMF structure on the LMF stack.
* On arm, this is intermixed with the initialization of other fields of the structure.
*/
static guint8*
emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
{
int i;
if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) {
code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR));
} else {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern));
code = emit_call_seq (cfg, code);
}
/* we build the MonoLMF structure on the stack - see mini-arm.h */
/* lmf_offset is the offset from the previous stack pointer,
* alloc_size is the total stack space allocated, so the offset
* of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
* The pointer to the struct is put in r1 (new_lmf).
* ip is used as scratch
* The callee-saved registers are already in the MonoLMF structure
*/
code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
/* r0 is the result from mono_get_lmf_addr () */
ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
/* new_lmf->previous_lmf = *lmf_addr */
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* *(lmf_addr) = r1 */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* Skip method (only needed for trampoline LMF frames) */
ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
/* save the current IP */
ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t))
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
return code;
}
typedef struct {
gint32 vreg;
gint32 hreg;
} FloatArgData;
static guint8 *
emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
{
GSList *list;
set_code_cursor (cfg, code);
for (list = inst->float_args; list; list = list->next) {
FloatArgData *fad = (FloatArgData*)list->data;
MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
gboolean imm = arm_is_fpimm8 (var->inst_offset);
/* 4+1 insns for emit_big_add () and 1 for FLDS. */
if (!imm)
*max_len += 20 + 4;
*max_len += 4;
code = realloc_code (cfg, *max_len);
if (!imm) {
code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
} else
ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
set_code_cursor (cfg, code);
*offset = code - cfg->native_code;
}
return code;
}
static guint8 *
mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
{
MonoInst *inst;
g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
if (IS_HARD_FLOAT) {
if (!arm_is_fpimm8 (inst->inst_offset)) {
code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
ARM_FSTD (code, reg, ARMREG_LR, 0);
} else
ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
}
return code;
}
static guint8 *
mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
{
MonoInst *inst;
g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
if (IS_HARD_FLOAT) {
if (!arm_is_fpimm8 (inst->inst_offset)) {
code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
ARM_FLDD (code, reg, ARMREG_LR, 0);
} else
ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
}
return code;
}
/*
* emit_restore_lmf:
*
* Emit code to pop an LMF structure from the LMF stack.
*/
static guint8*
emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
{
int basereg, offset;
if (lmf_offset < 32) {
basereg = cfg->frame_reg;
offset = lmf_offset;
} else {
basereg = ARMREG_R2;
offset = 0;
code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
}
/* ip = previous_lmf */
ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* lr = lmf_addr */
ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
/* *(lmf_addr) = previous_lmf */
ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
return code;
}
#endif /* #ifndef DISABLE_JIT */
/*
* mono_arch_get_argument_info:
* @csig: a method signature
* @param_count: the number of parameters to consider
* @arg_info: an array to store the result infos
*
* Gathers information on parameters such as size, alignment and
* padding. arg_info should be large enought to hold param_count + 1 entries.
*
* Returns the size of the activation frame.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k, frame_size = 0;
guint32 size, align, pad;
int offset = 8;
MonoType *t;
t = mini_get_underlying_type (csig->ret);
if (MONO_TYPE_ISSTRUCT (t)) {
frame_size += sizeof (target_mgreg_t);
offset += 4;
}
arg_info [0].offset = offset;
if (csig->hasthis) {
frame_size += sizeof (target_mgreg_t);
offset += 4;
}
arg_info [0].size = frame_size;
for (k = 0; k < param_count; k++) {
size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled);
/* ignore alignment for now */
align = 1;
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
frame_size += size;
arg_info [k + 1].pad = 0;
arg_info [k + 1].size = size;
offset += pad;
arg_info [k + 1].offset = offset;
offset += size;
}
align = MONO_ARCH_FRAME_ALIGNMENT;
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
return frame_size;
}
#define MAX_ARCH_DELEGATE_PARAMS 3
static guint8*
get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count)
{
guint8 *code, *start;
GSList *unwind_ops = mono_arch_get_cie_program ();
if (has_target) {
start = code = mono_global_codeman_reserve (12);
/* Replace the this argument with the target */
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
g_assert ((code - start) <= 12);
mono_arch_flush_icache (start, 12);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
} else {
int size, i;
size = 8 + param_count * 4;
start = code = mono_global_codeman_reserve (size);
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
/* slide down the arguments */
for (i = 0; i < param_count; ++i) {
ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
}
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
} else {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
*info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
g_free (name);
}
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
return start;
}
/*
* mono_arch_get_delegate_invoke_impls:
*
* Return a list of MonoAotTrampInfo structures for the delegate invoke impl
* trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
MonoTrampInfo *info;
int i;
get_delegate_invoke_impl (&info, TRUE, 0);
res = g_slist_prepend (res, info);
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
get_delegate_invoke_impl (&info, FALSE, i);
res = g_slist_prepend (res, info);
}
return res;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
MonoType *sig_ret;
/* FIXME: Support more cases */
sig_ret = mini_get_underlying_type (sig->ret);
if (MONO_TYPE_ISSTRUCT (sig_ret))
return NULL;
if (has_target) {
static guint8* cached = NULL;
mono_mini_arch_lock ();
if (cached) {
mono_mini_arch_unlock ();
return cached;
}
if (mono_ee_features.use_aot_trampolines) {
start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, TRUE, 0);
mono_tramp_info_register (info, NULL);
}
cached = start;
mono_mini_arch_unlock ();
return cached;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
mono_mini_arch_lock ();
code = cache [sig->param_count];
if (code) {
mono_mini_arch_unlock ();
return code;
}
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8*)mono_aot_get_trampoline (name);
g_free (name);
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
mono_tramp_info_register (info, NULL);
}
cache [sig->param_count] = start;
mono_mini_arch_unlock ();
return start;
}
return NULL;
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
return NULL;
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [ARMREG_R0];
}
/*
* Initialize the cpu to execute managed code.
*/
void
mono_arch_cpu_init (void)
{
i8_align = MONO_ABI_ALIGNOF (gint64);
#ifdef MONO_CROSS_COMPILE
/* Need to set the alignment of i8 since it can different on the target */
#ifdef TARGET_ANDROID
/* linux gnueabi */
mono_type_set_alignment (MONO_TYPE_I8, i8_align);
#endif
#endif
}
/*
* Initialize architecture specific code.
*/
void
mono_arch_init (void)
{
char *cpu_arch;
#ifdef TARGET_WATCHOS
mini_debug_options.soft_breakpoints = TRUE;
#endif
mono_os_mutex_init_recursive (&mini_arch_mutex);
if (mini_debug_options.soft_breakpoints) {
if (!mono_aot_only)
breakpoint_tramp = mini_get_breakpoint_trampoline ();
} else {
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
}
#if defined(__ARM_EABI__)
eabi_supported = TRUE;
#endif
#if defined(ARM_FPU_VFP_HARD)
arm_fpu = MONO_ARM_FPU_VFP_HARD;
#else
arm_fpu = MONO_ARM_FPU_VFP;
#if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
/*
* If we're compiling with a soft float fallback and it
* turns out that no VFP unit is available, we need to
* switch to soft float. We don't do this for iOS, since
* iOS devices always have a VFP unit.
*/
if (!mono_hwcap_arm_has_vfp)
arm_fpu = MONO_ARM_FPU_NONE;
/*
* This environment variable can be useful in testing
* environments to make sure the soft float fallback
* works. Most ARM devices have VFP units these days, so
* normally soft float code would not be exercised much.
*/
char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
if (soft && !strncmp (soft, "1", 1))
arm_fpu = MONO_ARM_FPU_NONE;
g_free (soft);
#endif
#endif
v5_supported = mono_hwcap_arm_is_v5;
v6_supported = mono_hwcap_arm_is_v6;
v7_supported = mono_hwcap_arm_is_v7;
/*
* On weird devices, the hwcap code may fail to detect
* the ARM version. In that case, we can at least safely
* assume the version the runtime was compiled for.
*/
#ifdef HAVE_ARMV5
v5_supported = TRUE;
#endif
#ifdef HAVE_ARMV6
v6_supported = TRUE;
#endif
#ifdef HAVE_ARMV7
v7_supported = TRUE;
#endif
#if defined(TARGET_IOS)
/* iOS is special-cased here because we don't yet
have a way to properly detect CPU features on it. */
thumb_supported = TRUE;
iphone_abi = TRUE;
#elif defined(TARGET_ANDROID)
thumb_supported = TRUE;
#else
thumb_supported = mono_hwcap_arm_has_thumb;
thumb2_supported = mono_hwcap_arm_has_thumb2;
#endif
/* Format: armv(5|6|7[s])[-thumb[2]] */
cpu_arch = g_getenv ("MONO_CPU_ARCH");
/* Do this here so it overrides any detection. */
if (cpu_arch) {
if (strncmp (cpu_arch, "armv", 4) == 0) {
v5_supported = cpu_arch [4] >= '5';
v6_supported = cpu_arch [4] >= '6';
v7_supported = cpu_arch [4] >= '7';
v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0;
}
thumb_supported = strstr (cpu_arch, "thumb") != NULL;
thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
g_free (cpu_arch);
}
}
/*
* Cleanup architecture specific code.
*/
void
mono_arch_cleanup (void)
{
}
/*
* This function returns the optimizations supported on this cpu.
*/
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
/* no arm-specific optimizations yet */
*exclude_mask = 0;
return 0;
}
gboolean
mono_arm_is_hard_float (void)
{
return arm_fpu == MONO_ARM_FPU_VFP_HARD;
}
#ifndef DISABLE_JIT
gboolean
mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
{
if (v7s_supported || v7k_supported) {
switch (opcode) {
case OP_IDIV:
case OP_IREM:
case OP_IDIV_UN:
case OP_IREM_UN:
return FALSE;
default:
break;
}
}
return TRUE;
}
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
gboolean
mono_arch_is_soft_float (void)
{
return arm_fpu == MONO_ARM_FPU_NONE;
}
#endif
static gboolean
is_regsize_var (MonoType *t)
{
if (m_type_is_byref (t))
return TRUE;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return TRUE;
case MONO_TYPE_OBJECT:
return TRUE;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t))
return TRUE;
return FALSE;
case MONO_TYPE_VALUETYPE:
return FALSE;
}
return FALSE;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
/* we can only allocate 32 bit values */
if (is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
}
}
return vars;
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
mono_arch_compute_omit_fp (cfg);
/*
* FIXME: Interface calls might go through a static rgctx trampoline which
* sets V5, but it doesn't save it, so we need to save it ourselves, and
* avoid using it.
*/
if (cfg->flags & MONO_CFG_HAS_CALLS)
cfg->uses_rgctx_reg = TRUE;
if (cfg->arch.omit_fp)
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
if (iphone_abi)
/* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
else
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
return regs;
}
/*
* mono_arch_regalloc_cost:
*
* Return the cost, in number of memory references, of the action of
* allocating the variable VMV into a register during global register
* allocation.
*/
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
/* FIXME: */
return 2;
}
#endif /* #ifndef DISABLE_JIT */
void
mono_arch_flush_icache (guint8 *code, gint size)
{
#if defined(MONO_CROSS_COMPILE)
#elif __APPLE__
sys_icache_invalidate (code, size);
#else
__builtin___clear_cache ((char*)code, (char*)code + size);
#endif
}
#define DEBUG(a)
static void inline
add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
{
if (simple) {
if (*gr > ARMREG_R3) {
ainfo->size = 4;
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->storage = RegTypeBase;
*stack_size += 4;
} else {
ainfo->storage = RegTypeGeneral;
ainfo->reg = *gr;
}
} else {
gboolean split;
if (eabi_supported)
split = i8_align == 4;
else
split = TRUE;
ainfo->size = 8;
if (*gr == ARMREG_R3 && split) {
/* first word in r3 and the second on the stack */
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->storage = RegTypeBaseGen;
*stack_size += 4;
} else if (*gr >= ARMREG_R3) {
if (eabi_supported) {
/* darwin aligns longs to 4 byte only */
if (i8_align == 8) {
*stack_size += 7;
*stack_size &= ~7;
}
}
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->storage = RegTypeBase;
*stack_size += 8;
} else {
if (eabi_supported) {
if (i8_align == 8 && ((*gr) & 1))
(*gr) ++;
}
ainfo->storage = RegTypeIRegPair;
ainfo->reg = *gr;
}
(*gr) ++;
}
(*gr) ++;
}
static void inline
add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
{
/*
* If we're calling a function like this:
*
* void foo(float a, double b, float c)
*
* We pass a in s0 and b in d1. That leaves us
* with s1 being unused. The armhf ABI recognizes
* this and requires register assignment to then
* use that for the next single-precision arg,
* i.e. c in this example. So float_spare either
* tells us which reg to use for the next single-
* precision arg, or it's -1, meaning use *fpr.
*
* Note that even though most of the JIT speaks
* double-precision, fpr represents single-
* precision registers.
*
* See parts 5.5 and 6.1.2 of the AAPCS for how
* this all works.
*/
if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
ainfo->storage = RegTypeFP;
if (is_double) {
/*
* If we're passing a double-precision value
* and *fpr is odd (e.g. it's s1, s3, ...)
* we need to use the next even register. So
* we mark the current *fpr as a spare that
* can be used for the next single-precision
* value.
*/
if (*fpr % 2) {
*float_spare = *fpr;
(*fpr)++;
}
/*
* At this point, we have an even register
* so we assign that and move along.
*/
ainfo->reg = *fpr;
*fpr += 2;
} else if (*float_spare >= 0) {
/*
* We're passing a single-precision value
* and it looks like a spare single-
* precision register is available. Let's
* use it.
*/
ainfo->reg = *float_spare;
*float_spare = -1;
} else {
/*
* If we hit this branch, we're passing a
* single-precision value and we can simply
* use the next available register.
*/
ainfo->reg = *fpr;
(*fpr)++;
}
} else {
/*
* We've exhausted available floating point
* regs, so pass the rest on the stack.
*/
if (is_double) {
*stack_size += 7;
*stack_size &= ~7;
}
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP;
ainfo->storage = RegTypeBase;
*stack_size += is_double ? 8 : 4;
}
}
static gboolean
is_hfa (MonoType *t, int *out_nfields, int *out_esize)
{
MonoClass *klass;
gpointer iter;
MonoClassField *field;
MonoType *ftype, *prev_ftype = NULL;
int nfields = 0;
klass = mono_class_from_mono_type_internal (t);
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
ftype = mono_field_get_type_internal (field);
ftype = mini_get_underlying_type (ftype);
if (MONO_TYPE_ISSTRUCT (ftype)) {
int nested_nfields, nested_esize;
if (!is_hfa (ftype, &nested_nfields, &nested_esize))
return FALSE;
if (nested_esize == 4)
ftype = m_class_get_byval_arg (mono_defaults.single_class);
else
ftype = m_class_get_byval_arg (mono_defaults.double_class);
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
nfields += nested_nfields;
} else {
if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
return FALSE;
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
nfields ++;
}
}
if (nfields == 0 || nfields > 4)
return FALSE;
*out_nfields = nfields;
*out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
return TRUE;
}
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
guint i, gr, fpr, pstart;
gint float_spare;
int n = sig->hasthis + sig->param_count;
int nfields, esize;
guint32 align;
MonoType *t;
guint32 stack_size = 0;
CallInfo *cinfo;
gboolean is_pinvoke = sig->pinvoke;
gboolean vtype_retaddr = FALSE;
if (mp)
cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
gr = ARMREG_R0;
fpr = ARM_VFP_F0;
float_spare = -1;
t = mini_get_underlying_type (sig->ret);
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
cinfo->ret.storage = RegTypeGeneral;
cinfo->ret.reg = ARMREG_R0;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
cinfo->ret.storage = RegTypeIRegPair;
cinfo->ret.reg = ARMREG_R0;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
cinfo->ret.storage = RegTypeFP;
if (t->type == MONO_TYPE_R4)
cinfo->ret.size = 4;
else
cinfo->ret.size = 8;
if (IS_HARD_FLOAT) {
cinfo->ret.reg = ARM_VFP_F0;
} else {
cinfo->ret.reg = ARMREG_R0;
}
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
cinfo->ret.storage = RegTypeGeneral;
cinfo->ret.reg = ARMREG_R0;
break;
}
if (mini_is_gsharedvt_variable_type (t)) {
cinfo->ret.storage = RegTypeStructByAddr;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
cinfo->ret.storage = RegTypeHFA;
cinfo->ret.reg = 0;
cinfo->ret.nregs = nfields;
cinfo->ret.esize = esize;
} else {
if (sig->pinvoke && !sig->marshalling_disabled) {
int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
int max_size;
#ifdef TARGET_WATCHOS
max_size = 16;
#else
max_size = 4;
#endif
if (native_size <= max_size) {
cinfo->ret.storage = RegTypeStructByVal;
cinfo->ret.struct_size = native_size;
cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4;
} else {
cinfo->ret.storage = RegTypeStructByAddr;
}
} else {
cinfo->ret.storage = RegTypeStructByAddr;
}
}
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (t));
cinfo->ret.storage = RegTypeStructByAddr;
break;
case MONO_TYPE_VOID:
break;
default:
g_error ("Can't handle as return value 0x%x", sig->ret->type);
}
vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr;
pstart = 0;
n = 0;
/*
* To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
* the first argument, allowing 'this' to be always passed in the first arg reg.
* Also do this if the first argument is a reference type, since virtual calls
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
} else {
add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
pstart = 1;
}
n ++;
cinfo->ret.reg = gr;
gr ++;
cinfo->vret_arg_index = 1;
} else {
/* this */
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
n ++;
}
if (vtype_retaddr) {
cinfo->ret.reg = gr;
gr ++;
}
}
DEBUG(g_print("params: %d\n", sig->param_count));
for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [n];
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
gr = ARMREG_R3 + 1;
fpr = ARM_VFP_F16;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
}
DEBUG(g_print("param %d: ", i));
if (m_type_is_byref (sig->params [i])) {
DEBUG(g_print("byref\n"));
add_general (&gr, &stack_size, ainfo, TRUE);
n++;
continue;
}
t = mini_get_underlying_type (sig->params [i]);
switch (t->type) {
case MONO_TYPE_I1:
cinfo->args [n].is_signed = 1;
case MONO_TYPE_U1:
cinfo->args [n].size = 1;
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_I2:
cinfo->args [n].is_signed = 1;
case MONO_TYPE_U2:
cinfo->args [n].size = 2;
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
cinfo->args [n].size = 4;
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
cinfo->args [n].size = sizeof (target_mgreg_t);
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
cinfo->args [n].size = sizeof (target_mgreg_t);
add_general (&gr, &stack_size, ainfo, TRUE);
break;
}
if (mini_is_gsharedvt_variable_type (t)) {
/* gsharedvt arguments are passed by ref */
g_assert (mini_is_gsharedvt_type (t));
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
ainfo->storage = RegTypeGSharedVtInReg;
break;
case RegTypeBase:
ainfo->storage = RegTypeGSharedVtOnStack;
break;
default:
g_assert_not_reached ();
}
break;
}
/* Fall through */
case MONO_TYPE_TYPEDBYREF:
case MONO_TYPE_VALUETYPE: {
gint size;
int align_size;
int nwords, nfields, esize;
guint32 align;
if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
if (fpr + nfields < ARM_VFP_F16) {
ainfo->storage = RegTypeHFA;
ainfo->reg = fpr;
ainfo->nregs = nfields;
ainfo->esize = esize;
if (esize == 4)
fpr += nfields;
else
fpr += nfields * 2;
break;
} else {
fpr = ARM_VFP_F16;
}
}
if (t->type == MONO_TYPE_TYPEDBYREF) {
size = MONO_ABI_SIZEOF (MonoTypedRef);
align = sizeof (target_mgreg_t);
} else {
MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]);
if (sig->pinvoke && !sig->marshalling_disabled)
size = mono_class_native_size (klass, &align);
else
size = mini_type_stack_size_full (t, &align, FALSE);
}
DEBUG(g_print ("load %d bytes struct\n", size));
#ifdef TARGET_WATCHOS
/* Watchos pass large structures by ref */
/* We only do this for pinvoke to make gsharedvt/dyncall simpler */
if (sig->pinvoke && size > 16) {
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
ainfo->storage = RegTypeStructByAddr;
break;
case RegTypeBase:
ainfo->storage = RegTypeStructByAddrOnStack;
break;
default:
g_assert_not_reached ();
break;
}
break;
}
#endif
align_size = size;
nwords = 0;
align_size += (sizeof (target_mgreg_t) - 1);
align_size &= ~(sizeof (target_mgreg_t) - 1);
nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t);
ainfo->storage = RegTypeStructByVal;
ainfo->struct_size = size;
ainfo->align = align;
if (eabi_supported) {
if (align >= 8 && (gr & 1))
gr ++;
}
if (gr > ARMREG_R3) {
ainfo->size = 0;
ainfo->vtsize = nwords;
} else {
int rest = ARMREG_R3 - gr + 1;
int n_in_regs = rest >= nwords? nwords: rest;
ainfo->size = n_in_regs;
ainfo->vtsize = nwords - n_in_regs;
ainfo->reg = gr;
gr += n_in_regs;
nwords -= n_in_regs;
}
stack_size = ALIGN_TO (stack_size, align);
ainfo->offset = stack_size;
/*g_print ("offset for arg %d at %d\n", n, stack_size);*/
stack_size += nwords * sizeof (target_mgreg_t);
break;
}
case MONO_TYPE_U8:
case MONO_TYPE_I8:
ainfo->size = 8;
add_general (&gr, &stack_size, ainfo, FALSE);
break;
case MONO_TYPE_R4:
ainfo->size = 4;
if (IS_HARD_FLOAT)
add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
else
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_R8:
ainfo->size = 8;
if (IS_HARD_FLOAT)
add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
else
add_general (&gr, &stack_size, ainfo, FALSE);
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt arguments are passed by ref */
g_assert (mini_is_gsharedvt_type (t));
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
ainfo->storage = RegTypeGSharedVtInReg;
break;
case RegTypeBase:
ainfo->storage = RegTypeGSharedVtOnStack;
break;
default:
g_assert_not_reached ();
}
break;
default:
g_error ("Can't handle 0x%x", sig->params [i]->type);
}
n ++;
}
/* Handle the case where there are no implicit arguments */
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
gr = ARMREG_R3 + 1;
fpr = ARM_VFP_F16;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
}
DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT);
cinfo->stack_usage = stack_size;
return cinfo;
}
/*
* We need to create a temporary value if the argument is not stored in
* a linear memory range in the ccontext (this normally happens for
* value types if they are passed both by stack and regs).
*/
static int
arg_need_temp (ArgInfo *ainfo)
{
if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize)
return ainfo->struct_size;
return 0;
}
static gpointer
arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
{
switch (ainfo->storage) {
case RegTypeIRegPair:
case RegTypeGeneral:
case RegTypeStructByVal:
return &ccontext->gregs [ainfo->reg];
case RegTypeHFA:
case RegTypeFP:
if (IS_HARD_FLOAT)
return &ccontext->fregs [ainfo->reg];
else
return &ccontext->gregs [ainfo->reg];
case RegTypeBase:
return ccontext->stack + ainfo->offset;
default:
g_error ("Arg storage type not yet supported");
}
}
static void
arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
int reg_size = ainfo->size * sizeof (host_mgreg_t);
g_assert (arg_need_temp (ainfo));
memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size);
memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size);
}
static void
arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
{
int reg_size = ainfo->size * sizeof (host_mgreg_t);
g_assert (arg_need_temp (ainfo));
memcpy (&ccontext->gregs [ainfo->reg], src, reg_size);
memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size);
}
/* Set arguments in the ccontext (for i2n entry) */
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
memset (ccontext, 0, sizeof (CallContext));
ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
if (ccontext->stack_size)
ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == RegTypeStructByAddr) {
storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage;
}
}
g_assert (!sig->hasthis);
for (int i = 0; i < sig->param_count; i++) {
ainfo = &cinfo->args [i];
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size); // FIXME? alloca in a loop
else
storage = arg_get_storage (ccontext, ainfo);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
/* Set return value in the ccontext (for n2i return) */
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
gpointer storage;
ArgInfo *ainfo;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (retp) {
g_assert (ainfo->storage == RegTypeStructByAddr);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp);
} else {
g_assert (ainfo->storage != RegTypeStructByAddr);
g_assert (!arg_need_temp (ainfo));
storage = arg_get_storage (ccontext, ainfo);
memset (ccontext, 0, sizeof (CallContext)); // FIXME
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
/* Gets the arguments from ccontext (for n2i entry) */
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
ainfo = &cinfo->args [i];
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size); // FIXME? alloca in a loop
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
}
storage = NULL;
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == RegTypeStructByAddr)
storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg];
}
g_free (cinfo);
return storage;
}
/* Gets the return value from ccontext (for i2n exit) */
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
ArgInfo *ainfo;
gpointer storage;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (ainfo->storage != RegTypeStructByAddr) {
g_assert (!arg_need_temp (ainfo));
storage = arg_get_storage (ccontext, ainfo);
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
#ifndef DISABLE_JIT
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
g_assert (caller_sig);
g_assert (callee_sig);
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
/*
* Tailcalls with more callee stack usage than the caller cannot be supported, since
* the extra stack space would be left on the stack after the tailcall.
*/
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
&& IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
// FIXME The limit here is that moving the parameters requires addressing the parameters
// with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4));
res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4));
g_free (caller_info);
g_free (callee_info);
return res;
}
static gboolean
debug_omit_fp (void)
{
#if 0
return mono_debug_count ();
#else
return TRUE;
#endif
}
/**
* mono_arch_compute_omit_fp:
* Determine whether the frame pointer can be eliminated.
*/
static void
mono_arch_compute_omit_fp (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i, locals_size;
CallInfo *cinfo;
if (cfg->arch.omit_fp_computed)
return;
header = cfg->header;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*
* FIXME: Remove some of the restrictions.
*/
cfg->arch.omit_fp = TRUE;
cfg->arch.omit_fp_computed = TRUE;
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
if (!debug_omit_fp ())
cfg->arch.omit_fp = FALSE;
/*
if (cfg->method->save_lmf)
cfg->arch.omit_fp = FALSE;
*/
if (cfg->flags & MONO_CFG_HAS_ALLOCA)
cfg->arch.omit_fp = FALSE;
if (header->num_clauses)
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
cfg->arch.omit_fp = FALSE;
if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)))
cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
/*
* The stack offset can only be determined when the frame
* size is known.
*/
cfg->arch.omit_fp = FALSE;
}
}
locals_size = 0;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
int ialign;
locals_size += mono_type_size (ins->inst_vtype, &ialign);
}
}
/*
* Set var information according to the calling convention. arm version.
* The locals var stuff should most likely be split in another method.
*/
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
MonoInst *ins;
MonoType *sig_ret;
int i, offset, size, align, curinst;
CallInfo *cinfo;
ArgInfo *ainfo;
guint32 ualign;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
sig_ret = mini_get_underlying_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
if (cfg->arch.omit_fp)
cfg->frame_reg = ARMREG_SP;
else
cfg->frame_reg = ARMREG_FP;
cfg->flags |= MONO_CFG_HAS_SPILLUP;
/* allow room for the vararg method args: void* and long/double */
if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8);
header = cfg->header;
/* See mono_arch_get_global_int_regs () */
if (cfg->flags & MONO_CFG_HAS_CALLS)
cfg->uses_rgctx_reg = TRUE;
if (cfg->frame_reg != ARMREG_SP)
cfg->used_int_regs |= 1 << cfg->frame_reg;
if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG);
offset = 0;
curinst = 0;
if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) {
if (sig_ret->type != MONO_TYPE_VOID) {
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = ARMREG_R0;
}
}
/* local vars are at a positive offset from the stack pointer */
/*
* also note that if the function uses alloca, we use FP
* to point at the local variables.
*/
offset = 0; /* linkage area */
/* align the offset to 16 bytes: not sure this is needed here */
//offset += 8 - 1;
//offset &= ~(8 - 1);
/* add parameter area size for called functions */
offset += cfg->param_area;
offset += 8 - 1;
offset &= ~(8 - 1);
if (cfg->flags & MONO_CFG_HAS_FPOUT)
offset += 8;
/* allow room to save the return value */
if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
offset += 8;
switch (cinfo->ret.storage) {
case RegTypeStructByVal:
case RegTypeHFA:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
offset = ALIGN_TO (offset, 8);
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = cfg->frame_reg;
cfg->ret->inst_offset = offset;
if (cinfo->ret.storage == RegTypeStructByVal)
offset += cinfo->ret.nregs * sizeof (target_mgreg_t);
else
offset += 32;
break;
case RegTypeStructByAddr:
ins = cfg->vret_addr;
offset += sizeof (target_mgreg_t) - 1;
offset &= ~(sizeof (target_mgreg_t) - 1);
ins->inst_offset = offset;
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
g_print ("vret_addr =");
mono_print_ins (cfg->vret_addr);
}
offset += sizeof (target_mgreg_t);
break;
default:
break;
}
/* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
if (cfg->arch.seq_point_info_var) {
MonoInst *ins;
ins = cfg->arch.seq_point_info_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->arch.ss_trigger_page_var) {
MonoInst *ins;
ins = cfg->arch.ss_trigger_page_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->arch.seq_point_ss_method_var) {
MonoInst *ins;
ins = cfg->arch.seq_point_ss_method_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->arch.seq_point_bp_method_var) {
MonoInst *ins;
ins = cfg->arch.seq_point_bp_method_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
/* Allocate a temporary used by the atomic ops */
size = 4;
align = 4;
/* Allocate a local slot to hold the sig cookie address */
offset += align - 1;
offset &= ~(align - 1);
cfg->arch.atomic_tmp_offset = offset;
offset += size;
} else {
cfg->arch.atomic_tmp_offset = -1;
}
cfg->locals_min_stack_offset = offset;
curinst = cfg->locals_start;
for (i = curinst; i < cfg->num_varinfo; ++i) {
MonoType *t;
ins = cfg->varinfo [i];
if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
continue;
t = ins->inst_vtype;
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structure */
if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign);
align = ualign;
}
else
size = mono_type_size (t, &align);
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
* since it loads/stores misaligned words, which don't do the right thing.
*/
if (align < 4 && size >= 4)
align = 4;
if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_offset = offset;
ins->inst_basereg = cfg->frame_reg;
offset += size;
//g_print ("allocating local %d to %d\n", i, inst->inst_offset);
}
cfg->locals_max_stack_offset = offset;
curinst = 0;
if (sig->hasthis) {
ins = cfg->args [curinst];
if (ins->opcode != OP_REGVAR) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
offset += sizeof (target_mgreg_t) - 1;
offset &= ~(sizeof (target_mgreg_t) - 1);
ins->inst_offset = offset;
offset += sizeof (target_mgreg_t);
}
curinst++;
}
if (sig->call_convention == MONO_CALL_VARARG) {
size = 4;
align = 4;
/* Allocate a local slot to hold the sig cookie address */
offset += align - 1;
offset &= ~(align - 1);
cfg->sig_cookie = offset;
offset += size;
}
for (i = 0; i < sig->param_count; ++i) {
ainfo = cinfo->args + i;
ins = cfg->args [curinst];
switch (ainfo->storage) {
case RegTypeHFA:
offset = ALIGN_TO (offset, 8);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
ins->inst_offset = offset;
if (cfg->verbose_level >= 2)
g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
// FIXME:
offset += 32;
break;
default:
break;
}
if (ins->opcode != OP_REGVAR) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke && !sig->marshalling_disabled);
align = ualign;
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
* since it loads/stores misaligned words, which don't do the right thing.
*/
if (align < 4 && size >= 4)
align = 4;
/* The code in the prolog () stores words when storing vtypes received in a register */
if (MONO_TYPE_ISSTRUCT (sig->params [i]))
align = 4;
if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += align - 1;
offset &= ~(align - 1);
ins->inst_offset = offset;
offset += size;
}
curinst++;
}
/* align the offset to 8 bytes */
if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += 8 - 1;
offset &= ~(8 - 1);
/* change sign? */
cfg->stack_offset = offset;
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
int i;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (IS_HARD_FLOAT) {
for (i = 0; i < 2; i++) {
MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL);
inst->flags |= MONO_INST_VOLATILE;
cfg->arch.vfp_scratch_slots [i] = inst;
}
}
if (cinfo->ret.storage == RegTypeStructByVal)
cfg->ret_var_is_local = TRUE;
if (cinfo->ret.storage == RegTypeStructByAddr) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
g_print ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points) {
if (cfg->compile_aot) {
MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
if (!cfg->soft_breakpoints) {
/* Allocate a separate variable for this to save 1 load per seq point */
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_trigger_page_var = ins;
}
}
if (cfg->soft_breakpoints) {
MonoInst *ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_ss_method_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_bp_method_var = ins;
}
}
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmp_sig;
int sig_reg;
if (MONO_IS_TAILCALL_OPCODE (call))
NOT_IMPLEMENTED;
g_assert (cinfo->sig_cookie.storage == RegTypeBase);
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it are
* passed on the stack after the signature. So compensate by
* passing a different signature.
*/
tmp_sig = mono_metadata_signature_dup (call->signature);
tmp_sig->param_count -= call->signature->sentinelpos;
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
sig_reg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
}
#ifdef ENABLE_LLVM
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
LLVMCallInfo *linfo;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
/*
* LLVM always uses the native ABI while we use our own ABI, the
* only difference is the handling of vtypes:
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
switch (cinfo->ret.storage) {
case RegTypeNone:
linfo->ret.storage = LLVMArgNone;
break;
case RegTypeGeneral:
case RegTypeFP:
case RegTypeIRegPair:
linfo->ret.storage = LLVMArgNormal;
break;
case RegTypeStructByAddr:
if (sig->pinvoke) {
linfo->ret.storage = LLVMArgVtypeByRef;
} else {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
}
break;
#if TARGET_WATCHOS
case RegTypeStructByVal:
/* LLVM models this by returning an int array */
linfo->ret.storage = LLVMArgAsIArgs;
linfo->ret.nslots = cinfo->ret.nregs;
break;
#endif
case RegTypeHFA:
linfo->ret.storage = LLVMArgFpStruct;
linfo->ret.nslots = cinfo->ret.nregs;
linfo->ret.esize = cinfo->ret.esize;
break;
default:
cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage);
cfg->disable_llvm = TRUE;
return linfo;
}
for (i = 0; i < n; ++i) {
LLVMArgInfo *lainfo = &linfo->args [i];
ainfo = cinfo->args + i;
lainfo->storage = LLVMArgNone;
switch (ainfo->storage) {
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeBase:
case RegTypeBaseGen:
case RegTypeFP:
lainfo->storage = LLVMArgNormal;
break;
case RegTypeStructByVal: {
lainfo->storage = LLVMArgAsIArgs;
int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4;
lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize;
lainfo->esize = slotsize;
break;
}
case RegTypeStructByAddr:
case RegTypeStructByAddrOnStack:
lainfo->storage = LLVMArgVtypeByRef;
break;
case RegTypeHFA: {
int j;
lainfo->storage = LLVMArgAsFpArgs;
lainfo->nslots = ainfo->nregs;
lainfo->esize = ainfo->esize;
for (j = 0; j < ainfo->nregs; ++j)
lainfo->pair_storage [j] = LLVMArgInFPReg;
break;
}
default:
cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
cfg->disable_llvm = TRUE;
break;
}
}
return linfo;
}
#endif
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *in, *ins;
MonoMethodSignature *sig;
int i, n;
CallInfo *cinfo;
sig = call->signature;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
switch (cinfo->ret.storage) {
case RegTypeStructByVal:
case RegTypeHFA:
if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
/* The JIT will transform this into a normal call */
call->vret_in_reg = TRUE;
break;
}
if (MONO_IS_TAILCALL_OPCODE (call))
break;
/*
* The vtype is returned in registers, save the return area address in a local, and save the vtype into
* the location pointed to by it after call in emit_move_return_value ().
*/
if (!cfg->arch.vret_addr_loc) {
cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* Prevent it from being register allocated or optimized away */
cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
break;
case RegTypeStructByAddr: {
MonoInst *vtarg;
MONO_INST_NEW (cfg, vtarg, OP_MOVE);
vtarg->sreg1 = call->vret_var->dreg;
vtarg->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
break;
}
default:
break;
}
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = cinfo->args + i;
MonoType *t;
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mini_get_underlying_type (t);
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Emit the signature cookie just before the implicit arguments */
emit_sig_cookie (cfg, call, cinfo);
}
in = call->args [i];
switch (ainfo->storage) {
case RegTypeGeneral:
case RegTypeIRegPair:
if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = MONO_LVREG_LS (in->dreg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = MONO_LVREG_MS (in->dreg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
} else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
if (ainfo->size == 4) {
if (IS_SOFT_FLOAT) {
/* mono_emit_call_args () have already done the r8->r4 conversion */
/* The converted value is in an int vreg */
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
} else {
int creg;
cfg->param_area = MAX (cfg->param_area, 8);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
}
} else {
if (IS_SOFT_FLOAT) {
MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
} else {
int creg;
cfg->param_area = MAX (cfg->param_area, 8);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
}
}
cfg->flags |= MONO_CFG_HAS_FPOUT;
} else {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
}
break;
case RegTypeStructByVal:
case RegTypeGSharedVtInReg:
case RegTypeGSharedVtOnStack:
case RegTypeHFA:
case RegTypeStructByAddr:
case RegTypeStructByAddrOnStack:
MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
ins->opcode = OP_OUTARG_VT;
ins->sreg1 = in->dreg;
ins->klass = in->klass;
ins->inst_p0 = call;
ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
mono_call_inst_add_outarg_vt (cfg, call, ins);
MONO_ADD_INS (cfg->cbb, ins);
break;
case RegTypeBase:
if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
} else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
if (t->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
} else {
if (IS_SOFT_FLOAT)
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
else
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
}
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
}
break;
case RegTypeBaseGen:
if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg));
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
} else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) {
int creg;
/* This should work for soft-float as well */
cfg->param_area = MAX (cfg->param_area, 8);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
creg = mono_alloc_ireg (cfg);
mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
cfg->flags |= MONO_CFG_HAS_FPOUT;
} else {
g_assert_not_reached ();
}
break;
case RegTypeFP: {
int fdreg = mono_alloc_freg (cfg);
if (ainfo->size == 8) {
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->sreg1 = in->dreg;
ins->dreg = fdreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
} else {
FloatArgData *fad;
/*
* Mono's register allocator doesn't speak single-precision registers that
* overlap double-precision registers (i.e. armhf). So we have to work around
* the register allocator and load the value from memory manually.
*
* So we create a variable for the float argument and an instruction to store
* the argument into the variable. We then store the list of these arguments
* in call->float_args. This list is then used by emit_float_args later to
* pass the arguments in the various call opcodes.
*
* This is not very nice, and we should really try to fix the allocator.
*/
MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
/* Make sure the instruction isn't seen as pointless and removed.
*/
float_arg->flags |= MONO_INST_VOLATILE;
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
/* We use the dreg to look up the instruction later. The hreg is used to
* emit the instruction that loads the value into the FP reg.
*/
fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
fad->vreg = float_arg->dreg;
fad->hreg = ainfo->reg;
call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
}
call->used_iregs |= 1 << ainfo->reg;
cfg->flags |= MONO_CFG_HAS_FPOUT;
break;
}
default:
g_assert_not_reached ();
}
}
/* Handle the case where there are no implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
call->call_info = cinfo;
call->stack_usage = cinfo->stack_usage;
}
static void
add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
{
MonoInst *ins;
switch (storage) {
case RegTypeFP:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
break;
}
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
MonoInst *load;
ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
int ovf_size = ainfo->vtsize;
int doffset = ainfo->offset;
int struct_size = ainfo->struct_size;
int i, soffset, dreg, tmpreg;
switch (ainfo->storage) {
case RegTypeGSharedVtInReg:
case RegTypeStructByAddr:
/* Pass by addr */
mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
break;
case RegTypeGSharedVtOnStack:
case RegTypeStructByAddrOnStack:
/* Pass by addr on stack */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
break;
case RegTypeHFA:
for (i = 0; i < ainfo->nregs; ++i) {
if (ainfo->esize == 4)
MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
else
MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
load->dreg = mono_alloc_freg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = i * ainfo->esize;
MONO_ADD_INS (cfg->cbb, load);
if (ainfo->esize == 4) {
FloatArgData *fad;
/* See RegTypeFP in mono_arch_emit_call () */
MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
float_arg->flags |= MONO_INST_VOLATILE;
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg);
fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
fad->vreg = float_arg->dreg;
fad->hreg = ainfo->reg + i;
call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
} else {
add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load);
}
}
break;
default:
soffset = 0;
for (i = 0; i < ainfo->size; ++i) {
dreg = mono_alloc_ireg (cfg);
switch (struct_size) {
case 1:
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
break;
case 2:
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
break;
case 3:
tmpreg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
break;
default:
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
break;
}
mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
soffset += sizeof (target_mgreg_t);
struct_size -= sizeof (target_mgreg_t);
}
//g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
if (ovf_size != 0)
mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4);
break;
}
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (!m_type_is_byref (ret)) {
if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
MonoInst *ins;
if (COMPILE_LLVM (cfg)) {
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
} else {
MONO_INST_NEW (cfg, ins, OP_SETLRET);
ins->sreg1 = MONO_LVREG_LS (val->dreg);
ins->sreg2 = MONO_LVREG_MS (val->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
return;
}
switch (arm_fpu) {
case MONO_ARM_FPU_NONE:
if (ret->type == MONO_TYPE_R8) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_SETFRET);
ins->dreg = cfg->ret->dreg;
ins->sreg1 = val->dreg;
MONO_ADD_INS (cfg->cbb, ins);
return;
}
if (ret->type == MONO_TYPE_R4) {
/* Already converted to an int in method_to_ir () */
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
return;
}
break;
case MONO_ARM_FPU_VFP:
case MONO_ARM_FPU_VFP_HARD:
if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_SETFRET);
ins->dreg = cfg->ret->dreg;
ins->sreg1 = val->dreg;
MONO_ADD_INS (cfg->cbb, ins);
return;
}
break;
default:
g_assert_not_reached ();
}
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
#endif /* #ifndef DISABLE_JIT */
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return TRUE;
}
typedef struct {
MonoMethodSignature *sig;
CallInfo *cinfo;
MonoType *rtype;
MonoType **param_types;
} ArchDynCallInfo;
static gboolean
dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
{
int i;
switch (cinfo->ret.storage) {
case RegTypeNone:
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeStructByAddr:
break;
case RegTypeFP:
if (IS_VFP)
break;
else
return FALSE;
default:
return FALSE;
}
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
int last_slot;
switch (ainfo->storage) {
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeBaseGen:
case RegTypeFP:
break;
case RegTypeBase:
break;
case RegTypeStructByVal:
if (ainfo->size == 0)
last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
else
last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
break;
default:
return FALSE;
}
}
// FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
for (i = 0; i < sig->param_count; ++i) {
MonoType *t = sig->params [i];
if (m_type_is_byref (t))
continue;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_R4:
case MONO_TYPE_R8:
if (IS_SOFT_FLOAT)
return FALSE;
else
break;
/*
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return FALSE;
*/
default:
break;
}
}
return TRUE;
}
MonoDynCallInfo*
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
ArchDynCallInfo *info;
CallInfo *cinfo;
int i;
cinfo = get_call_info (NULL, sig);
if (!dyn_call_supported (cinfo, sig)) {
g_free (cinfo);
return NULL;
}
info = g_new0 (ArchDynCallInfo, 1);
// FIXME: Preprocess the info to speed up start_dyn_call ()
info->sig = sig;
info->cinfo = cinfo;
info->rtype = mini_get_underlying_type (sig->ret);
info->param_types = g_new0 (MonoType*, sig->param_count);
for (i = 0; i < sig->param_count; ++i)
info->param_types [i] = mini_get_underlying_type (sig->params [i]);
return (MonoDynCallInfo*)info;
}
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_free (ainfo->cinfo);
g_free (ainfo);
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage;
}
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
CallInfo *cinfo = dinfo->cinfo;
DynCallArgs *p = (DynCallArgs*)buf;
int arg_index, greg, i, j, pindex;
MonoMethodSignature *sig = dinfo->sig;
p->res = 0;
p->ret = ret;
p->has_fpregs = 0;
p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
arg_index = 0;
greg = 0;
pindex = 0;
if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]);
if (!sig->hasthis)
pindex = 1;
}
if (dinfo->cinfo->ret.storage == RegTypeStructByAddr)
p->regs [greg ++] = (host_mgreg_t)(gsize)ret;
for (i = pindex; i < sig->param_count; i++) {
MonoType *t = dinfo->param_types [i];
gpointer *arg = args [arg_index ++];
ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
int slot = -1;
if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) {
slot = ainfo->reg;
} else if (ainfo->storage == RegTypeFP) {
} else if (ainfo->storage == RegTypeBase) {
slot = PARAM_REGS + (ainfo->offset / 4);
} else if (ainfo->storage == RegTypeBaseGen) {
/* slot + 1 is the first stack slot, so the code below will work */
slot = 3;
} else {
g_assert_not_reached ();
}
if (m_type_is_byref (t)) {
p->regs [slot] = (host_mgreg_t)(gsize)*arg;
continue;
}
switch (t->type) {
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
p->regs [slot] = (host_mgreg_t)(gsize)*arg;
break;
case MONO_TYPE_U1:
p->regs [slot] = *(guint8*)arg;
break;
case MONO_TYPE_I1:
p->regs [slot] = *(gint8*)arg;
break;
case MONO_TYPE_I2:
p->regs [slot] = *(gint16*)arg;
break;
case MONO_TYPE_U2:
p->regs [slot] = *(guint16*)arg;
break;
case MONO_TYPE_I4:
p->regs [slot] = *(gint32*)arg;
break;
case MONO_TYPE_U4:
p->regs [slot] = *(guint32*)arg;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
break;
case MONO_TYPE_R4:
if (ainfo->storage == RegTypeFP) {
float f = *(float*)arg;
p->fpregs [ainfo->reg / 2] = *(double*)&f;
p->has_fpregs = 1;
} else {
p->regs [slot] = *(host_mgreg_t*)arg;
}
break;
case MONO_TYPE_R8:
if (ainfo->storage == RegTypeFP) {
p->fpregs [ainfo->reg / 2] = *(double*)arg;
p->has_fpregs = 1;
} else {
p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
}
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
p->regs [slot] = (host_mgreg_t)(gsize)*arg;
break;
} else {
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
guint8 *nullable_buf;
int size;
size = mono_class_value_size (klass, NULL);
nullable_buf = g_alloca (size);
g_assert (nullable_buf);
/* The argument pointed to by arg is either a boxed vtype or null */
mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
arg = (gpointer*)nullable_buf;
/* Fall though */
} else {
/* Fall though */
}
}
case MONO_TYPE_VALUETYPE:
g_assert (ainfo->storage == RegTypeStructByVal);
if (ainfo->size == 0)
slot = PARAM_REGS + (ainfo->offset / 4);
else
slot = ainfo->reg;
for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
p->regs [slot ++] = ((host_mgreg_t*)arg) [j];
break;
default:
g_assert_not_reached ();
}
}
}
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
MonoType *ptype = ainfo->rtype;
guint8 *ret = p->ret;
host_mgreg_t res = p->res;
host_mgreg_t res2 = p->res2;
switch (ptype->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
case MONO_TYPE_OBJECT:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
*(gpointer*)ret = (gpointer)(gsize)res;
break;
case MONO_TYPE_I1:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
*(gint32*)ret = res;
break;
case MONO_TYPE_U4:
*(guint32*)ret = res;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
/* This handles endianness as well */
((gint32*)ret) [0] = res;
((gint32*)ret) [1] = res2;
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (ptype)) {
*(gpointer*)ret = (gpointer)res;
break;
} else {
/* Fall though */
}
case MONO_TYPE_VALUETYPE:
g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr);
/* Nothing to do */
break;
case MONO_TYPE_R4:
g_assert (IS_VFP);
if (IS_HARD_FLOAT)
*(float*)ret = *(float*)&p->fpregs [0];
else
*(float*)ret = *(float*)&res;
break;
case MONO_TYPE_R8: {
host_mgreg_t regs [2];
g_assert (IS_VFP);
if (IS_HARD_FLOAT) {
*(double*)ret = p->fpregs [0];
} else {
regs [0] = res;
regs [1] = res2;
*(double*)ret = *(double*)®s;
}
break;
}
default:
g_assert_not_reached ();
}
}
#ifndef DISABLE_JIT
/*
* The immediate field for cond branches is big enough for all reasonable methods
*/
#define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
if (0 && ins->inst_true_bb->native_offset) { \
ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
} else { \
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
ARM_B_COND (code, (condcode), 0); \
}
#define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
/* emit an exception if condition is fail
*
* We assign the extra code used to throw the implicit exceptions
* to cfg->bb_exit as far as the big branch handling is concerned
*/
#define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
do { \
mono_add_patch_info (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_EXC, exc_name); \
ARM_BL_COND (code, (condcode), 0); \
} while (0);
#define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
switch (ins->opcode) {
case OP_MUL_IMM:
case OP_IMUL_IMM:
/* Already done by an arch-independent pass */
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
/*
* OP_STORE_MEMBASE_REG reg, offset(basereg)
* OP_LOAD_MEMBASE offset(basereg), reg
*/
if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
|| last_ins->opcode == OP_STORE_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
MONO_DELETE_INS (bb, ins);
continue;
} else {
//static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
ins->opcode = OP_MOVE;
ins->sreg1 = last_ins->sreg1;
}
/*
* Note: reg1 must be different from the basereg in the second load
* OP_LOAD_MEMBASE offset(basereg), reg1
* OP_LOAD_MEMBASE offset(basereg), reg2
* -->
* OP_LOAD_MEMBASE offset(basereg), reg1
* OP_MOVE reg1, reg2
*/
} if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
|| last_ins->opcode == OP_LOAD_MEMBASE) &&
ins->inst_basereg != last_ins->dreg &&
ins->inst_basereg == last_ins->inst_basereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->dreg) {
MONO_DELETE_INS (bb, ins);
continue;
} else {
ins->opcode = OP_MOVE;
ins->sreg1 = last_ins->dreg;
}
//g_assert_not_reached ();
#if 0
/*
* OP_STORE_MEMBASE_IMM imm, offset(basereg)
* OP_LOAD_MEMBASE offset(basereg), reg
* -->
* OP_STORE_MEMBASE_IMM imm, offset(basereg)
* OP_ICONST reg, imm
*/
} else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
|| last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
//static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
ins->opcode = OP_ICONST;
ins->inst_c0 = last_ins->inst_imm;
g_assert_not_reached (); // check this rule
#endif
}
break;
case OP_LOADU1_MEMBASE:
case OP_LOADI1_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
ins->sreg1 = last_ins->sreg1;
}
break;
case OP_LOADU2_MEMBASE:
case OP_LOADI2_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
ins->sreg1 = last_ins->sreg1;
}
break;
case OP_MOVE:
ins->opcode = OP_MOVE;
/*
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
MONO_DELETE_INS (bb, ins);
continue;
}
/*
* OP_MOVE sreg, dreg
* OP_MOVE dreg, sreg
*/
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
MONO_DELETE_INS (bb, ins);
continue;
}
break;
}
}
}
/*
* the branch_cc_table should maintain the order of these
* opcodes.
case CEE_BEQ:
case CEE_BGE:
case CEE_BGT:
case CEE_BLE:
case CEE_BLT:
case CEE_BNE_UN:
case CEE_BGE_UN:
case CEE_BGT_UN:
case CEE_BLE_UN:
case CEE_BLT_UN:
*/
static const guchar
branch_cc_table [] = {
ARMCOND_EQ,
ARMCOND_GE,
ARMCOND_GT,
ARMCOND_LE,
ARMCOND_LT,
ARMCOND_NE,
ARMCOND_HS,
ARMCOND_HI,
ARMCOND_LS,
ARMCOND_LO
};
#define ADD_NEW_INS(cfg,dest,op) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
static int
map_to_reg_reg_op (int op)
{
switch (op) {
case OP_ADD_IMM:
return OP_IADD;
case OP_SUB_IMM:
return OP_ISUB;
case OP_AND_IMM:
return OP_IAND;
case OP_COMPARE_IMM:
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
case OP_ADDCC_IMM:
return OP_ADDCC;
case OP_ADC_IMM:
return OP_ADC;
case OP_SUBCC_IMM:
return OP_SUBCC;
case OP_SBB_IMM:
return OP_SBB;
case OP_OR_IMM:
return OP_IOR;
case OP_XOR_IMM:
return OP_IXOR;
case OP_LOAD_MEMBASE:
return OP_LOAD_MEMINDEX;
case OP_LOADI4_MEMBASE:
return OP_LOADI4_MEMINDEX;
case OP_LOADU4_MEMBASE:
return OP_LOADU4_MEMINDEX;
case OP_LOADU1_MEMBASE:
return OP_LOADU1_MEMINDEX;
case OP_LOADI2_MEMBASE:
return OP_LOADI2_MEMINDEX;
case OP_LOADU2_MEMBASE:
return OP_LOADU2_MEMINDEX;
case OP_LOADI1_MEMBASE:
return OP_LOADI1_MEMINDEX;
case OP_STOREI1_MEMBASE_REG:
return OP_STOREI1_MEMINDEX;
case OP_STOREI2_MEMBASE_REG:
return OP_STOREI2_MEMINDEX;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMINDEX;
case OP_STORE_MEMBASE_REG:
return OP_STORE_MEMINDEX;
case OP_STORER4_MEMBASE_REG:
return OP_STORER4_MEMINDEX;
case OP_STORER8_MEMBASE_REG:
return OP_STORER8_MEMINDEX;
case OP_STORE_MEMBASE_IMM:
return OP_STORE_MEMBASE_REG;
case OP_STOREI1_MEMBASE_IMM:
return OP_STOREI1_MEMBASE_REG;
case OP_STOREI2_MEMBASE_IMM:
return OP_STOREI2_MEMBASE_REG;
case OP_STOREI4_MEMBASE_IMM:
return OP_STOREI4_MEMBASE_REG;
}
g_assert_not_reached ();
}
/*
* Remove from the instruction list the instructions that can't be
* represented with very simple instructions with no register
* requirements.
*/
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *temp, *last_ins = NULL;
int rot_amount, imm8, low_imm;
MONO_BB_FOR_EACH_INS (bb, ins) {
loop_start:
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_SUB_IMM:
case OP_AND_IMM:
case OP_COMPARE_IMM:
case OP_ICOMPARE_IMM:
case OP_ADDCC_IMM:
case OP_ADC_IMM:
case OP_SUBCC_IMM:
case OP_SBB_IMM:
case OP_OR_IMM:
case OP_XOR_IMM:
case OP_IADD_IMM:
case OP_ISUB_IMM:
case OP_IAND_IMM:
case OP_IADC_IMM:
case OP_ISBB_IMM:
case OP_IOR_IMM:
case OP_IXOR_IMM:
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
int opcode2 = mono_op_imm_to_op (ins->opcode);
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
if (opcode2 == -1)
g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
ins->opcode = opcode2;
}
if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
goto loop_start;
else
break;
case OP_MUL_IMM:
case OP_IMUL_IMM:
if (ins->inst_imm == 1) {
ins->opcode = OP_MOVE;
break;
}
if (ins->inst_imm == 0) {
ins->opcode = OP_ICONST;
ins->inst_c0 = 0;
break;
}
imm8 = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1;
if (imm8 > 0) {
ins->opcode = OP_SHL_IMM;
ins->inst_imm = imm8;
break;
}
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = OP_IMUL;
break;
case OP_SBB:
case OP_ISBB:
case OP_SUBCC:
case OP_ISUBCC: {
int try_count = 2;
MonoInst *current = ins;
/* may require a look-ahead of a couple instructions due to spilling */
while (try_count-- && current->next) {
if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) {
/* ARM sets the C flag to 1 if there was _no_ overflow */
current->next->opcode = OP_COND_EXC_NC;
break;
}
current = current->next;
}
break;
}
case OP_IDIV_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_IMM:
case OP_IREM_UN_IMM: {
int opcode2 = mono_op_imm_to_op (ins->opcode);
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
if (opcode2 == -1)
g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
ins->opcode = opcode2;
break;
}
case OP_LOCALLOC_IMM:
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = OP_LOCALLOC;
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADU1_MEMBASE:
/* we can do two things: load the immed in a register
* and use an indexed load, or see if the immed can be
* represented as an ad_imm + a load with a smaller offset
* that fits. We just do the first for now, optimize later.
*/
if (arm_is_imm12 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI1_MEMBASE:
if (arm_is_imm8 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_LOADR4_MEMBASE:
case OP_LOADR8_MEMBASE:
if (arm_is_fpimm8 (ins->inst_offset))
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_basereg;
temp->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = temp->dreg;
ins->inst_offset = low_imm;
} else {
MonoInst *add_ins;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ADD_NEW_INS (cfg, add_ins, OP_IADD);
add_ins->sreg1 = ins->inst_basereg;
add_ins->sreg2 = temp->dreg;
add_ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = add_ins->dreg;
ins->inst_offset = 0;
}
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI1_MEMBASE_REG:
if (arm_is_imm12 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_STOREI2_MEMBASE_REG:
if (arm_is_imm8 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_STORER4_MEMBASE_REG:
case OP_STORER8_MEMBASE_REG:
if (arm_is_fpimm8 (ins->inst_offset))
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_destbasereg;
temp->dreg = mono_alloc_ireg (cfg);
ins->inst_destbasereg = temp->dreg;
ins->inst_offset = low_imm;
} else {
MonoInst *add_ins;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ADD_NEW_INS (cfg, add_ins, OP_IADD);
add_ins->sreg1 = ins->inst_destbasereg;
add_ins->sreg2 = temp->dreg;
add_ins->dreg = mono_alloc_ireg (cfg);
ins->inst_destbasereg = add_ins->dreg;
ins->inst_offset = 0;
}
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
case OP_FCOMPARE:
case OP_RCOMPARE: {
gboolean swap = FALSE;
int reg;
if (!ins->next) {
/* Optimized away */
NULLIFY_INS (ins);
break;
}
/* Some fp compares require swapped operands */
switch (ins->next->opcode) {
case OP_FBGT:
ins->next->opcode = OP_FBLT;
swap = TRUE;
break;
case OP_FBGT_UN:
ins->next->opcode = OP_FBLT_UN;
swap = TRUE;
break;
case OP_FBLE:
ins->next->opcode = OP_FBGE;
swap = TRUE;
break;
case OP_FBLE_UN:
ins->next->opcode = OP_FBGE_UN;
swap = TRUE;
break;
default:
break;
}
if (swap) {
reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = reg;
}
break;
}
}
last_ins = ins;
}
bb->last_ins = last_ins;
bb->max_vreg = cfg->next_vreg;
}
void
mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
{
MonoInst *ins;
if (long_ins->opcode == OP_LNEG) {
ins = long_ins;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0);
NULLIFY_INS (ins);
}
}
static guchar*
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg */
if (IS_VFP) {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
if (is_signed)
ARM_TOSIZD (code, vfp_scratch1, sreg);
else
ARM_TOUIZD (code, vfp_scratch1, sreg);
ARM_FMRS (code, dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
if (!is_signed) {
if (size == 1)
ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SHR_IMM (code, dreg, dreg, 16);
}
} else {
if (size == 1) {
ARM_SHL_IMM (code, dreg, dreg, 24);
ARM_SAR_IMM (code, dreg, dreg, 24);
} else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SAR_IMM (code, dreg, dreg, 16);
}
}
return code;
}
static guchar*
emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg */
g_assert (IS_VFP);
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
if (is_signed)
ARM_TOSIZS (code, vfp_scratch1, sreg);
else
ARM_TOUIZS (code, vfp_scratch1, sreg);
ARM_FMRS (code, dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
if (!is_signed) {
if (size == 1)
ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SHR_IMM (code, dreg, dreg, 16);
}
} else {
if (size == 1) {
ARM_SHL_IMM (code, dreg, dreg, 24);
ARM_SAR_IMM (code, dreg, dreg, 24);
} else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SAR_IMM (code, dreg, dreg, 16);
}
}
return code;
}
#endif /* #ifndef DISABLE_JIT */
#define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
static void
emit_thunk (guint8 *code, gconstpointer target)
{
guint8 *p = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
if (thumb_supported)
ARM_BX (code, ARMREG_IP);
else
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
*(guint32*)code = (guint32)(gsize)target;
code += 4;
mono_arch_flush_icache (p, code - p);
}
static void
handle_thunk (MonoCompile *cfg, guchar *code, const guchar *target)
{
MonoJitInfo *ji = NULL;
MonoThunkJitInfo *info;
guint8 *thunks, *p;
int thunks_size;
guint8 *orig_target;
guint8 *target_thunk;
if (cfg) {
/*
* This can be called multiple times during JITting,
* save the current position in cfg->arch to avoid
* doing a O(n^2) search.
*/
if (!cfg->arch.thunks) {
cfg->arch.thunks = cfg->thunks;
cfg->arch.thunks_size = cfg->thunk_area;
}
thunks = cfg->arch.thunks;
thunks_size = cfg->arch.thunks_size;
if (!thunks_size) {
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
g_assert_not_reached ();
}
g_assert (*(guint32*)thunks == 0);
emit_thunk (thunks, target);
arm_patch (code, thunks);
cfg->arch.thunks += THUNK_SIZE;
cfg->arch.thunks_size -= THUNK_SIZE;
} else {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = mono_jit_info_get_thunk_info (ji);
g_assert (info);
thunks = (guint8*)ji->code_start + info->thunks_offset;
thunks_size = info->thunks_size;
orig_target = mono_arch_get_call_target (code + 4);
mono_mini_arch_lock ();
target_thunk = NULL;
if (orig_target >= thunks && orig_target < thunks + thunks_size) {
/* The call already points to a thunk, because of trampolines etc. */
target_thunk = orig_target;
} else {
for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
if (((guint32*)p) [0] == 0) {
/* Free entry */
target_thunk = p;
break;
} else if (((guint32*)p) [2] == (guint32)(gsize)target) {
/* Thunk already points to target */
target_thunk = p;
break;
}
}
}
//g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
if (!target_thunk) {
mono_mini_arch_unlock ();
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
g_assert_not_reached ();
}
emit_thunk (target_thunk, target);
arm_patch (code, target_thunk);
mono_arch_flush_icache (code, 4);
mono_mini_arch_unlock ();
}
}
static void
arm_patch_general (MonoCompile *cfg, guchar *code, const guchar *target)
{
guint32 *code32 = (guint32*)code;
guint32 ins = *code32;
guint32 prim = (ins >> 25) & 7;
guint32 tval = GPOINTER_TO_UINT (target);
//g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
if (prim == 5) { /* 101b */
/* the diff starts 8 bytes from the branch opcode */
gint diff = target - code - 8;
gint tbits;
gint tmask = 0xffffffff;
if (tval & 1) { /* entering thumb mode */
diff = target - 1 - code - 8;
g_assert (thumb_supported);
tbits = 0xf << 28; /* bl->blx bit pattern */
g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
/* this low bit of the displacement is moved to bit 24 in the instruction encoding */
if (diff & 2) {
tbits |= 1 << 24;
}
tmask = ~(1 << 24); /* clear the link bit */
/*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
} else {
tbits = 0;
}
if (diff >= 0) {
if (diff <= 33554431) {
diff >>= 2;
ins = (ins & 0xff000000) | diff;
ins &= tmask;
*code32 = ins | tbits;
return;
}
} else {
/* diff between 0 and -33554432 */
if (diff >= -33554432) {
diff >>= 2;
ins = (ins & 0xff000000) | (diff & ~0xff000000);
ins &= tmask;
*code32 = ins | tbits;
return;
}
}
handle_thunk (cfg, code, target);
return;
}
/*
* The alternative call sequences looks like this:
*
* ldr ip, [pc] // loads the address constant
* b 1f // jumps around the constant
* address constant embedded in the code
* 1f:
* mov lr, pc
* mov pc, ip
*
* There are two cases for patching:
* a) at the end of method emission: in this case code points to the start
* of the call sequence
* b) during runtime patching of the call site: in this case code points
* to the mov pc, ip instruction
*
* We have to handle also the thunk jump code sequence:
*
* ldr ip, [pc]
* mov pc, ip
* address constant // execution never reaches here
*/
if ((ins & 0x0ffffff0) == 0x12fff10) {
/* Branch and exchange: the address is constructed in a reg
* We can patch BX when the code sequence is the following:
* ldr ip, [pc, #0] ; 0x8
* b 0xc
* .word code_ptr
* mov lr, pc
* bx ips
* */
guint32 ccode [4];
guint8 *emit = (guint8*)ccode;
ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
ARM_B (emit, 0);
ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
ARM_BX (emit, ARMREG_IP);
/*patching from magic trampoline*/
if (ins == ccode [3]) {
g_assert (code32 [-4] == ccode [0]);
g_assert (code32 [-3] == ccode [1]);
g_assert (code32 [-1] == ccode [2]);
code32 [-2] = (guint32)(gsize)target;
return;
}
/*patching from JIT*/
if (ins == ccode [0]) {
g_assert (code32 [1] == ccode [1]);
g_assert (code32 [3] == ccode [2]);
g_assert (code32 [4] == ccode [3]);
code32 [2] = (guint32)(gsize)target;
return;
}
g_assert_not_reached ();
} else if ((ins & 0x0ffffff0) == 0x12fff30) {
/*
* ldr ip, [pc, #0]
* b 0xc
* .word code_ptr
* blx ip
*/
guint32 ccode [4];
guint8 *emit = (guint8*)ccode;
ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
ARM_B (emit, 0);
ARM_BLX_REG (emit, ARMREG_IP);
g_assert (code32 [-3] == ccode [0]);
g_assert (code32 [-2] == ccode [1]);
g_assert (code32 [0] == ccode [2]);
code32 [-1] = (guint32)(gsize)target;
} else {
guint32 ccode [4];
guint32 *tmp = ccode;
guint8 *emit = (guint8*)tmp;
ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
ARM_BX (emit, ARMREG_IP);
if (ins == ccode [2]) {
g_assert_not_reached (); // should be -2 ...
code32 [-1] = (guint32)(gsize)target;
return;
}
if (ins == ccode [0]) {
/* handles both thunk jump code and the far call sequence */
code32 [2] = (guint32)(gsize)target;
return;
}
g_assert_not_reached ();
}
// g_print ("patched with 0x%08x\n", ins);
}
void
arm_patch (guchar *code, const guchar *target)
{
arm_patch_general (NULL, code, target);
}
/*
* Return the >= 0 uimm8 value if val can be represented with a byte + rotation
* (with the rotation amount in *rot_amount. rot_amount is already adjusted
* to be used with the emit macros.
* Return -1 otherwise.
*/
int
mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
{
guint32 res, i;
for (i = 0; i < 31; i+= 2) {
if (i == 0)
res = val;
else
res = (val << (32 - i)) | (val >> i);
if (res & ~0xff)
continue;
*rot_amount = i? 32 - i: 0;
return res;
}
return -1;
}
/*
* Emits in code a sequence of instructions that load the value 'val'
* into the dreg register. Uses at most 4 instructions.
*/
guint8*
mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
{
int imm8, rot_amount;
#if 0
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
/* skip the constant pool */
ARM_B (code, 0);
*(int*)code = val;
code += 4;
return code;
#endif
if (mini_debug_options.single_imm_size && v7_supported) {
ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
return code;
}
if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
} else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
} else {
if (v7_supported) {
ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
if (val >> 16)
ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
return code;
}
if (val & 0xFF) {
ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
if (val & 0xFF00) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
}
if (val & 0xFF0000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
}
if (val & 0xFF000000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
}
} else if (val & 0xFF00) {
ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
if (val & 0xFF0000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
}
if (val & 0xFF000000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
}
} else if (val & 0xFF0000) {
ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
if (val & 0xFF000000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
}
}
//g_assert_not_reached ();
}
return code;
}
gboolean
mono_arm_thumb_supported (void)
{
return thumb_supported;
}
gboolean
mono_arm_eabi_supported (void)
{
return eabi_supported;
}
int
mono_arm_i8_align (void)
{
return i8_align;
}
#ifndef DISABLE_JIT
static guint8*
emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
{
CallInfo *cinfo;
MonoCallInst *call;
call = (MonoCallInst*)ins;
cinfo = call->call_info;
switch (cinfo->ret.storage) {
case RegTypeStructByVal:
case RegTypeHFA: {
MonoInst *loc = cfg->arch.vret_addr_loc;
int i;
if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
/* The JIT treats this as a normal call */
break;
}
/* Load the destination address */
g_assert (loc && loc->opcode == OP_REGOFFSET);
if (arm_is_imm12 (loc->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR);
}
if (cinfo->ret.storage == RegTypeStructByVal) {
int rsize = cinfo->ret.struct_size;
for (i = 0; i < cinfo->ret.nregs; ++i) {
g_assert (rsize >= 0);
switch (rsize) {
case 0:
break;
case 1:
ARM_STRB_IMM (code, i, ARMREG_LR, i * 4);
break;
case 2:
ARM_STRH_IMM (code, i, ARMREG_LR, i * 4);
break;
default:
ARM_STR_IMM (code, i, ARMREG_LR, i * 4);
break;
}
rsize -= 4;
}
} else {
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4);
else
ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8);
}
}
return code;
}
default:
break;
}
switch (ins->opcode) {
case OP_FCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE:
if (IS_VFP) {
MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
if (sig_ret->type == MONO_TYPE_R4) {
if (IS_HARD_FLOAT) {
ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
} else {
ARM_FMSR (code, ins->dreg, ARMREG_R0);
ARM_CVTS (code, ins->dreg, ins->dreg);
}
} else {
if (IS_HARD_FLOAT) {
ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
} else {
ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
}
}
}
break;
case OP_RCALL:
case OP_RCALL_REG:
case OP_RCALL_MEMBASE: {
MonoType *sig_ret;
g_assert (IS_VFP);
sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
g_assert (sig_ret->type == MONO_TYPE_R4);
if (IS_HARD_FLOAT) {
ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
} else {
ARM_FMSR (code, ins->dreg, ARMREG_R0);
ARM_CPYS (code, ins->dreg, ins->dreg);
}
break;
}
default:
break;
}
return code;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
MonoInst *last_ins = NULL;
int max_len, cpos;
int imm8, rot_amount;
/* we don't align basic blocks of loops on arm */
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
cpos = bb->max_offset;
if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
code = emit_call_seq (cfg, code);
}
MONO_BB_FOR_EACH_INS (bb, ins) {
guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
// if (ins->cil_code)
// g_print ("cil code\n");
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
case OP_MEMORY_BARRIER:
if (v7_supported) {
ARM_DMB (code, ARM_DMB_ISH);
} else if (v6_supported) {
ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
}
break;
case OP_TLS_GET:
code = emit_tls_get (code, ins->dreg, ins->inst_offset);
break;
case OP_TLS_SET:
code = emit_tls_set (code, ins->sreg1, ins->inst_offset);
break;
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_ADD_I4: {
int tmpreg;
guint8 *buf [16];
g_assert (v7_supported);
/* Free up a reg */
if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
tmpreg = ARMREG_IP;
else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
tmpreg = ARMREG_R0;
else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
tmpreg = ARMREG_R1;
else
tmpreg = ARMREG_R2;
g_assert (cfg->arch.atomic_tmp_offset != -1);
ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
switch (ins->opcode) {
case OP_ATOMIC_EXCHANGE_I4:
buf [0] = code;
ARM_DMB (code, ARM_DMB_ISH);
ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
buf [1] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (buf [1], buf [0]);
break;
case OP_ATOMIC_CAS_I4:
ARM_DMB (code, ARM_DMB_ISH);
buf [0] = code;
ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
buf [1] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
buf [2] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (buf [2], buf [0]);
arm_patch (buf [1], code);
break;
case OP_ATOMIC_ADD_I4:
buf [0] = code;
ARM_DMB (code, ARM_DMB_ISH);
ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
buf [1] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (buf [1], buf [0]);
break;
default:
g_assert_not_reached ();
}
ARM_DMB (code, ARM_DMB_ISH);
if (tmpreg != ins->dreg)
ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
break;
}
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8: {
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
ARM_DMB (code, ARM_DMB_ISH);
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
switch (ins->opcode) {
case OP_ATOMIC_LOAD_I1:
ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_U1:
ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_I2:
ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_U2:
ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U4:
ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_R4:
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
break;
case OP_ATOMIC_LOAD_R8:
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
break;
}
if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
ARM_DMB (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8: {
if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
ARM_DMB (code, ARM_DMB_ISH);
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
switch (ins->opcode) {
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
break;
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
break;
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
break;
case OP_ATOMIC_STORE_R4:
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0);
break;
case OP_ATOMIC_STORE_R8:
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
break;
}
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
ARM_DMB (code, ARM_DMB_ISH);
break;
}
case OP_BIGMUL:
ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_BIGMUL_UN:
ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_STOREI1_MEMBASE_IMM:
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_IMM:
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI1_MEMBASE_REG:
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_REG:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
/* this case is special, since it happens for spill code after lowering has been called */
if (arm_is_imm12 (ins->inst_offset)) {
ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
}
break;
case OP_STOREI1_MEMINDEX:
ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_STOREI2_MEMINDEX:
ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_STORE_MEMINDEX:
case OP_STOREI4_MEMINDEX:
ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_LOADU4_MEM:
g_assert_not_reached ();
break;
case OP_LOAD_MEMINDEX:
case OP_LOADI4_MEMINDEX:
case OP_LOADU4_MEMINDEX:
ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADI1_MEMINDEX:
ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADU1_MEMINDEX:
ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADI2_MEMINDEX:
ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADU2_MEMINDEX:
ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
/* this case is special, since it happens for spill code after lowering has been called */
if (arm_is_imm12 (ins->inst_offset)) {
ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
}
break;
case OP_LOADI1_MEMBASE:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU1_MEMBASE:
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU2_MEMBASE:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADI2_MEMBASE:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_ICONV_TO_I1:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
break;
case OP_ICONV_TO_I2:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
break;
case OP_ICONV_TO_U1:
ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
break;
case OP_ICONV_TO_U2:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
break;
case OP_COMPARE:
case OP_ICOMPARE:
ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
case OP_ICOMPARE_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
break;
case OP_BREAK:
/*
* gdb does not like encountering the hw breakpoint ins in the debugged code.
* So instead of emitting a trap, we emit a call a C function and place a
* breakpoint there.
*/
//*(int*)code = 0xef9f0001;
//code += 4;
//ARM_DBRK (code);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
code = emit_call_seq (cfg, code);
break;
case OP_RELAXED_NOP:
ARM_NOP (code);
break;
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
case OP_SEQ_POINT: {
int i;
MonoInst *info_var = cfg->arch.seq_point_info_var;
MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
MonoInst *var;
int dreg = ARMREG_LR;
#if 0
if (cfg->soft_breakpoints) {
g_assert (!cfg->compile_aot);
}
#endif
/*
* For AOT, we use one got slot per method, which will point to a
* SeqPointInfo structure, containing all the information required
* by the code below.
*/
if (cfg->compile_aot) {
g_assert (info_var);
g_assert (info_var->opcode == OP_REGOFFSET);
}
if (!cfg->soft_breakpoints && !cfg->compile_aot) {
/*
* Read from the single stepping trigger page. This will cause a
* SIGSEGV when single stepping is enabled.
* We do this _before_ the breakpoint, so single stepping after
* a breakpoint is hit will step to the next IL offset.
*/
g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
}
/* Single step check */
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
if (cfg->soft_breakpoints) {
/* Load the address of the sequence point method variable. */
var = ss_method_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
/* Read the value and check whether it is non-zero. */
ARM_LDR_IMM (code, dreg, dreg, 0);
ARM_CMP_REG_IMM (code, dreg, 0, 0);
/* Call it conditionally. */
ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
} else {
if (cfg->compile_aot) {
/* Load the trigger page addr from the variable initialized in the prolog */
var = ss_trigger_page_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
} else {
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(int*)code = (int)(gsize)ss_trigger_page;
code += 4;
}
ARM_LDR_IMM (code, dreg, dreg, 0);
}
}
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
/* Breakpoint check */
if (cfg->compile_aot) {
const guint32 offset = code - cfg->native_code;
guint32 val;
var = info_var;
code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
/* Add the offset */
val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
if (arm_is_imm12 ((int)val)) {
ARM_LDR_IMM (code, dreg, dreg, val);
} else {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
if (val & 0xFF00)
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
if (val & 0xFF0000)
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
g_assert (!(val & 0xFF000000));
ARM_LDR_IMM (code, dreg, dreg, 0);
}
/* What is faster, a branch or a load ? */
ARM_CMP_REG_IMM (code, dreg, 0, 0);
/* The breakpoint instruction */
if (cfg->soft_breakpoints)
ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
else
ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
} else if (cfg->soft_breakpoints) {
/* Load the address of the breakpoint method into ip. */
var = bp_method_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (var->inst_offset));
ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
ARM_NOP (code);
} else {
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
for (i = 0; i < 4; ++i)
ARM_NOP (code);
}
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
* to another IL offset.
*/
ARM_NOP (code);
break;
}
case OP_ADDCC:
case OP_IADDCC:
ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IADD:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADC:
case OP_IADC:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADDCC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADC_IMM:
case OP_IADC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IADD_OVF:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_IADD_OVF_UN:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_ISUB_OVF:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_ISUB_OVF_UN:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
case OP_ADD_OVF_CARRY:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_ADD_OVF_UN_CARRY:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUB_OVF_CARRY:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUB_OVF_UN_CARRY:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUBCC:
case OP_ISUBCC:
ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUBCC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ISUB:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SBB:
case OP_ISBB:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUB_IMM:
case OP_ISUB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_SBB_IMM:
case OP_ISBB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ARM_RSBS_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ARM_RSC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IAND:
ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_AND_IMM:
case OP_IAND_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IDIV:
g_assert (v7s_supported || v7k_supported);
ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IDIV_UN:
g_assert (v7s_supported || v7k_supported);
ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IREM:
g_assert (v7s_supported || v7k_supported);
ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
break;
case OP_IREM_UN:
g_assert (v7s_supported || v7k_supported);
ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
break;
case OP_DIV_IMM:
case OP_REM_IMM:
g_assert_not_reached ();
case OP_IOR:
ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_OR_IMM:
case OP_IOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IXOR:
ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_XOR_IMM:
case OP_IXOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ISHL:
ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHL_IMM:
case OP_ISHL_IMM:
if (ins->inst_imm)
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_ISHR:
ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHR_IMM:
case OP_ISHR_IMM:
if (ins->inst_imm)
ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_SHR_UN_IMM:
case OP_ISHR_UN_IMM:
if (ins->inst_imm)
ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_ISHR_UN:
ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_INOT:
ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_INEG:
ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
break;
case OP_IMUL:
if (ins->dreg == ins->sreg2)
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
else
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
break;
case OP_MUL_IMM:
g_assert_not_reached ();
break;
case OP_IMUL_OVF:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
/* FIXME: MUL doesn't set the C/O flags on ARM */
break;
case OP_IMUL_OVF_UN:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
/* FIXME: MUL doesn't set the C/O flags on ARM */
break;
case OP_ICONST:
code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
break;
case OP_AOTCONST:
/* Load the GOT offset */
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
/* Load the value from the GOT */
ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
break;
case OP_OBJC_GET_SELECTOR:
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
break;
case OP_ICONV_TO_I4:
case OP_ICONV_TO_U4:
case OP_MOVE:
if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_SETLRET: {
int saved = ins->sreg2;
if (ins->sreg2 == ARM_LSW_REG) {
ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
saved = ARMREG_LR;
}
if (ins->sreg1 != ARM_LSW_REG)
ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
if (saved != ARM_MSW_REG)
ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
break;
}
case OP_FMOVE:
if (IS_VFP && ins->dreg != ins->sreg1)
ARM_CPYD (code, ins->dreg, ins->sreg1);
break;
case OP_RMOVE:
if (IS_VFP && ins->dreg != ins->sreg1)
ARM_CPYS (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I4:
ARM_FMRS (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_I4_TO_F:
ARM_FMSR (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_R4:
if (IS_VFP)
ARM_CVTD (code, ins->dreg, ins->sreg1);
break;
case OP_TAILCALL_PARAMETER:
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL:
case OP_TAILCALL_MEMBASE:
case OP_TAILCALL_REG: {
gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE;
gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG;
MonoCallInst *call = (MonoCallInst*)ins;
max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, call, code, &max_len, &offset);
code = realloc_code (cfg, max_len);
// For reg and membase, get destination in IP.
if (tailcall_reg) {
g_assert (ins->sreg1 > -1);
if (ins->sreg1 != ARMREG_IP)
ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1);
} else if (tailcall_membase) {
g_assert (ins->sreg1 > -1);
if (!arm_is_imm12 (ins->inst_offset)) {
g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add
code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0);
} else {
ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
}
}
/*
* The stack looks like the following:
* <caller argument area>
* <saved regs etc>
* <rest of frame>
* <callee argument area>
* <optionally saved IP> (about to be)
* Need to copy the arguments from the callee argument area to
* the caller argument area, and pop the frame.
*/
if (call->stack_usage) {
int i, prev_sp_offset = 0;
// When we get here, the parameters to the tailcall are already formed,
// in registers and at the bottom of the grow-down stack.
//
// Our goal is generally preserve parameters, and trim the stack,
// and, before trimming stack, move parameters from the bottom of the
// frame to the bottom of the trimmed frame.
// For the case of large frames, and presently therefore always,
// IP is used as an adjusted frame_reg.
// Be conservative and save IP around the movement
// of parameters from the bottom of frame to top of the frame.
const gboolean save_ip = tailcall_membase || tailcall_reg;
if (save_ip)
ARM_PUSH (code, 1 << ARMREG_IP);
// When moving stacked parameters from the bottom
// of the frame (sp) to the top of the frame (ip),
// account, 0 or 4, for the conditional save of IP.
const int offset_sp = save_ip ? 4 : 0;
const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0;
/* Compute size of saved registers restored below */
if (iphone_abi)
prev_sp_offset = 2 * 4;
else
prev_sp_offset = 1 * 4;
for (i = 0; i < 16; ++i) {
if (cfg->used_int_regs & (1 << i))
prev_sp_offset += 4;
}
// Point IP at the start of where the parameters will go after trimming stack.
// After locals and saved registers.
code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
/* Copy arguments on the stack to our argument area */
// FIXME a fixed size memcpy is desirable here,
// at least for larger values of stack_usage.
//
// FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
// See https://github.com/mono/mono/pull/12079
// See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp);
ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip);
}
if (save_ip)
ARM_POP (code, 1 << ARMREG_IP);
}
/*
* Keep in sync with mono_arch_emit_epilog
*/
g_assert (!cfg->method->save_lmf);
code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR);
if (iphone_abi) {
if (cfg->used_int_regs)
ARM_POP (code, cfg->used_int_regs);
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
} else {
ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
}
if (tailcall_reg || tailcall_membase) {
code = emit_jmp_reg (code, ARMREG_IP);
} else {
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
if (cfg->compile_aot) {
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
} else {
code = mono_arm_patchable_b (code, ARMCOND_AL);
cfg->thunk_area += THUNK_SIZE;
}
}
break;
}
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
break;
case OP_ARGLIST: {
g_assert (cfg->sig_cookie < 128);
ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
break;
}
case OP_FCALL:
case OP_RCALL:
case OP_LCALL:
case OP_VCALL:
case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, call, code, &max_len, &offset);
mono_call_add_patch_info (cfg, call, code - cfg->native_code);
code = emit_call_seq (cfg, code);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
code = emit_call_reg (code, ins->sreg1);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
g_assert (ins->sreg1 != ARMREG_LR);
call = (MonoCallInst*)ins;
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, call, code, &max_len, &offset);
if (!arm_is_imm12 (ins->inst_offset)) {
/* sreg1 might be IP */
ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR);
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
} else {
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
}
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
}
case OP_GENERIC_CLASS_INIT: {
int byte_offset;
guint8 *jump;
byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
g_assert (arm_is_imm8 (byte_offset));
ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset);
ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
jump = code;
ARM_B_COND (code, ARMCOND_NE, 0);
/* Uninitialized case */
g_assert (ins->sreg1 == ARMREG_R0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
code = emit_call_seq (cfg, code);
/* Initialized case */
arm_patch (jump, code);
break;
}
case OP_LOCALLOC: {
/* round the size to 8 bytes */
ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1));
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
/* memzero the area: dreg holds the size, sp is the pointer */
if (ins->flags & MONO_INST_INIT) {
guint8 *start_loop, *branch_to_cond;
ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
branch_to_cond = code;
ARM_B (code, 0);
start_loop = code;
ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
arm_patch (branch_to_cond, code);
/* decrement by 4 and set flags */
ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t));
ARM_B_COND (code, ARMCOND_GE, 0);
arm_patch (code - 4, start_loop);
}
ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
if (cfg->param_area)
code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
break;
}
case OP_DYN_CALL: {
int i;
MonoInst *var = cfg->dyn_call_var;
guint8 *labels [16];
g_assert (var->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (var->inst_offset));
/* lr = args buffer filled by mono_arch_get_dyn_call_args () */
ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
/* ip = ftn */
ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2);
/* Save args buffer */
ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
/* Set fp argument registers */
if (IS_HARD_FLOAT) {
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs));
ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0);
labels [0] = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
for (i = 0; i < FP_PARAM_REGS; ++i) {
const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double));
g_assert (arm_is_fpimm8 (offset));
ARM_FLDD (code, i * 2, ARMREG_LR, offset);
}
arm_patch (labels [0], code);
}
/* Allocate callee area */
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1);
/* Set stack args */
/* R1 = limit */
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
/* R2 = pointer into regs */
code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t)));
/* R3 = pointer to stack */
ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP);
/* Loop */
labels [0] = code;
ARM_B_COND (code, ARMCOND_AL, 0);
labels [1] = code;
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0);
ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0);
ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0);
ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0);
ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0);
arm_patch (labels [0], code);
ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
labels [2] = code;
ARM_B_COND (code, ARMCOND_GT, 0);
arm_patch (labels [2], labels [1]);
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)));
/* Make the call */
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
/* Save result */
ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
if (IS_HARD_FLOAT)
ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs));
break;
}
case OP_THROW: {
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
code = emit_call_seq (cfg, code);
break;
}
case OP_RETHROW: {
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
code = emit_call_seq (cfg, code);
break;
}
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Reserve a param area, see filter-stack.exe */
if (param_area) {
if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
}
break;
}
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Free the param area */
if (param_area) {
if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
g_assert (ARMREG_IP != spvar->inst_basereg);
code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
}
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Free the param area */
if (param_area) {
if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
g_assert (ARMREG_IP != spvar->inst_basereg);
code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
}
case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
code = mono_arm_patchable_bl (code, ARMCOND_AL);
cfg->thunk_area += THUNK_SIZE;
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
break;
case OP_GET_EX_OBJ:
if (ins->dreg != ARMREG_R0)
ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0);
break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
case OP_BR:
/*if (ins->inst_target_bb->native_offset) {
ARM_B (code, 0);
//x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
} else*/ {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
code = mono_arm_patchable_b (code, ARMCOND_AL);
}
break;
case OP_BR_REG:
ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
break;
case OP_SWITCH:
/*
* In the normal case we have:
* ldr pc, [pc, ins->sreg1 << 2]
* nop
* If aot, we have:
* ldr lr, [pc, ins->sreg1 << 2]
* add pc, pc, lr
* After follows the data.
* FIXME: add aot support.
*/
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
max_len += 4 * GPOINTER_TO_INT (ins->klass);
code = realloc_code (cfg, max_len);
ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
ARM_NOP (code);
code += 4 * GPOINTER_TO_INT (ins->klass);
break;
case OP_CEQ:
case OP_ICEQ:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_CLT:
case OP_ICLT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
break;
case OP_CLT_UN:
case OP_ICLT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
break;
case OP_CGT:
case OP_ICGT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
break;
case OP_CGT_UN:
case OP_ICGT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
break;
case OP_ICNEQ:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
break;
case OP_ICGE:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
break;
case OP_ICLE:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
break;
case OP_ICGE_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
break;
case OP_ICLE_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_LT:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_GT:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_GE:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_LE:
case OP_COND_EXC_LE_UN:
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
break;
case OP_COND_EXC_IEQ:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_ILT:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_IGT:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_IGE:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_ILE:
case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
break;
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
break;
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
break;
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
break;
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
break;
case OP_IBEQ:
case OP_IBNE_UN:
case OP_IBLT:
case OP_IBLT_UN:
case OP_IBGT:
case OP_IBGT_UN:
case OP_IBGE:
case OP_IBGE_UN:
case OP_IBLE:
case OP_IBLE_UN:
EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
break;
/* floating point opcodes */
case OP_R8CONST:
if (cfg->compile_aot) {
ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 1);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
*(guint32*)code = ((guint32*)(ins->inst_p0))[1];
code += 4;
} else {
/* FIXME: we can optimize the imm load by dealing with part of
* the displacement in LDFD (aligning to 512).
*/
code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
}
break;
case OP_R4CONST:
if (cfg->compile_aot) {
ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
}
break;
case OP_STORER8_MEMBASE_REG:
/* This is generated by the local regalloc pass which runs after the lowering pass */
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
} else {
ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADR8_MEMBASE:
/* This is generated by the local regalloc pass which runs after the lowering pass */
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
} else {
ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
}
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_LOADR4_MEMBASE:
ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_ICONV_TO_R_UN: {
g_assert_not_reached ();
break;
}
case OP_ICONV_TO_R4:
ARM_FMSR (code, ins->dreg, ins->sreg1);
ARM_FSITOS (code, ins->dreg, ins->dreg);
break;
case OP_ICONV_TO_R8:
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_FMSR (code, vfp_scratch1, ins->sreg1);
ARM_FSITOD (code, ins->dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_SETFRET: {
MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
if (sig_ret->type == MONO_TYPE_R4) {
if (IS_HARD_FLOAT) {
if (ins->sreg1 != ARM_VFP_D0)
ARM_CPYS (code, ARM_VFP_D0, ins->sreg1);
} else {
ARM_FMRS (code, ARMREG_R0, ins->sreg1);
}
} else {
if (IS_HARD_FLOAT)
ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
else
ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
}
break;
}
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
case OP_FCONV_TO_U1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
break;
case OP_FCONV_TO_I2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
break;
case OP_FCONV_TO_U2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
break;
case OP_FCONV_TO_I4:
case OP_FCONV_TO_I:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
break;
case OP_FCONV_TO_U4:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
break;
case OP_FCONV_TO_I8:
case OP_FCONV_TO_U8:
g_assert_not_reached ();
/* Implemented as helper calls */
break;
case OP_LCONV_TO_R_UN:
g_assert_not_reached ();
/* Implemented as helper calls */
break;
case OP_LCONV_TO_OVF_I4_2: {
guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
/*
* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
*/
ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
high_bit_not_set = code;
ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
valid_negative = code;
ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
invalid_negative = code;
ARM_B_COND (code, ARMCOND_AL, 0);
arm_patch (high_bit_not_set, code);
ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
valid_positive = code;
ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
arm_patch (invalid_negative, code);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
arm_patch (valid_negative, code);
arm_patch (valid_positive, code);
if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
}
case OP_FADD:
ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FSUB:
ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FMUL:
ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FDIV:
ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FNEG:
ARM_NEGD (code, ins->dreg, ins->sreg1);
break;
case OP_FREM:
/* emulated */
g_assert_not_reached ();
break;
case OP_FCOMPARE:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
break;
case OP_RCOMPARE:
g_assert (IS_VFP);
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
break;
case OP_FCEQ:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_FCLT:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCLT_UN:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_FCGT:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCGT_UN:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_FCNEQ:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
break;
case OP_FCGE:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
case OP_FCLE:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
/* ARM FPA flags table:
* N Less than ARMCOND_MI
* Z Equal ARMCOND_EQ
* C Greater Than or Equal ARMCOND_CS
* V Unordered ARMCOND_VS
*/
case OP_FBEQ:
EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
break;
case OP_FBNE_UN:
EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
break;
case OP_FBLT:
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBLT_UN:
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBGT:
case OP_FBGT_UN:
case OP_FBLE:
case OP_FBLE_UN:
g_assert_not_reached ();
break;
case OP_FBGE:
if (IS_VFP) {
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
} else {
/* FPA requires EQ even thou the docs suggests that just CS is enough */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
}
break;
case OP_FBGE_UN:
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
break;
case OP_CKFINITE: {
if (IS_VFP) {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
ARM_ABSD (code, vfp_scratch2, ins->sreg1);
ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
ARM_B (code, 1);
*(guint32*)code = 0xffffffff;
code += 4;
*(guint32*)code = 0x7fefffff;
code += 4;
ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
ARM_FMSTAT (code);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException");
ARM_CMPD (code, ins->sreg1, ins->sreg1);
ARM_FMSTAT (code);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException");
ARM_CPYD (code, ins->dreg, ins->sreg1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
}
break;
}
case OP_RCONV_TO_I1:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
case OP_RCONV_TO_U1:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
break;
case OP_RCONV_TO_I2:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
break;
case OP_RCONV_TO_U2:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
break;
case OP_RCONV_TO_I4:
case OP_RCONV_TO_I:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
break;
case OP_RCONV_TO_U4:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
break;
case OP_RCONV_TO_R4:
g_assert (IS_VFP);
if (ins->dreg != ins->sreg1)
ARM_CPYS (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R8:
g_assert (IS_VFP);
ARM_CVTS (code, ins->dreg, ins->sreg1);
break;
case OP_RADD:
ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RSUB:
ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RMUL:
ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RDIV:
ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RNEG:
ARM_NEGS (code, ins->dreg, ins->sreg1);
break;
case OP_RCEQ:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_RCLT:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_RCLT_UN:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_RCGT:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_RCGT_UN:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_RCNEQ:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
break;
case OP_RCGE:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
case OP_RCLE:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
break;
case OP_GC_SPILL_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
guint8 *buf [1];
ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
buf [0] = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
code = emit_call_seq (cfg, code);
arm_patch (buf [0], code);
break;
}
case OP_FILL_PROF_CALL_CTX:
for (int i = 0; i < ARMREG_MAX; i++)
if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
}
cpos += max_len;
last_ins = ins;
}
set_code_cursor (cfg, code);
}
#endif /* DISABLE_JIT */
void
mono_arch_register_lowlevel_calls (void)
{
/* The signature doesn't matter */
mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE);
mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE);
mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE);
}
#define patch_lis_ori(ip,val) do {\
guint16 *__lis_ori = (guint16*)(ip); \
__lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
__lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
} while (0)
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
unsigned char *ip = ji->ip.i + code;
switch (ji->type) {
case MONO_PATCH_INFO_SWITCH: {
gpointer *jt = (gpointer*)(ip + 8);
int i;
/* jt is the inlined jump table, 2 instructions after ip
* In the normal case we store the absolute addresses,
* otherwise the displacements.
*/
for (i = 0; i < ji->data.table->table_size; i++)
jt [i] = code + (int)(gsize)ji->data.table->table [i];
break;
}
case MONO_PATCH_INFO_IP:
g_assert_not_reached ();
patch_lis_ori (ip, ip);
break;
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IMAGE:
case MONO_PATCH_INFO_FIELD:
case MONO_PATCH_INFO_VTABLE:
case MONO_PATCH_INFO_IID:
case MONO_PATCH_INFO_SFLDA:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
g_assert_not_reached ();
/* from OP_AOTCONST : lis + ori */
patch_lis_ori (ip, target);
break;
case MONO_PATCH_INFO_R4:
case MONO_PATCH_INFO_R8:
g_assert_not_reached ();
*((gconstpointer *)(ip + 2)) = target;
break;
case MONO_PATCH_INFO_EXC_NAME:
g_assert_not_reached ();
*((gconstpointer *)(ip + 1)) = target;
break;
case MONO_PATCH_INFO_NONE:
case MONO_PATCH_INFO_BB_OVF:
case MONO_PATCH_INFO_EXC_OVF:
/* everything is dealt with at epilog output time */
break;
default:
arm_patch_general (cfg, ip, (const guchar*)target);
break;
}
}
void
mono_arm_unaligned_stack (MonoMethod *method)
{
g_assert_not_reached ();
}
#ifndef DISABLE_JIT
/*
* Stack frame layout:
*
* ------------------- fp
* MonoLMF structure or saved registers
* -------------------
* locals
* -------------------
* spilled regs
* -------------------
* param area size is cfg->param_area
* ------------------- sp
*/
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part;
guint8 *code;
CallInfo *cinfo;
int lmf_offset = 0;
int prev_sp_offset, reg_offset;
sig = mono_method_signature_internal (method);
cfg->code_size = 256 + sig->param_count * 64;
code = cfg->native_code = g_malloc (cfg->code_size);
mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
alloc_size = cfg->stack_offset;
pos = 0;
prev_sp_offset = 0;
if (iphone_abi) {
/*
* The iphone uses R7 as the frame pointer, and it points at the saved
* r7+lr:
* <lr>
* r7 -> <r7>
* <rest of frame>
* We can't use r7 as a frame pointer since it points into the middle of
* the frame, so we keep using our own frame pointer.
* FIXME: Optimize this.
*/
ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
prev_sp_offset += 8; /* r7 and lr */
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
}
if (!method->save_lmf) {
if (iphone_abi) {
/* No need to push LR again */
if (cfg->used_int_regs)
ARM_PUSH (code, cfg->used_int_regs);
} else {
ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
prev_sp_offset += 4;
}
for (i = 0; i < 16; ++i) {
if (cfg->used_int_regs & (1 << i))
prev_sp_offset += 4;
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
reg_offset = 0;
for (i = 0; i < 16; ++i) {
if ((cfg->used_int_regs & (1 << i))) {
mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
reg_offset += 4;
}
}
mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
} else {
ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
ARM_PUSH (code, 0x5ff0);
prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
reg_offset = 0;
for (i = 0; i < 16; ++i) {
if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
/* The original r7 is saved at the start */
if (!(iphone_abi && i == ARMREG_R7))
mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
reg_offset += 4;
}
}
g_assert (reg_offset == 4 * 10);
pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10);
lmf_offset = pos;
}
alloc_size += pos;
orig_alloc_size = alloc_size;
// align to MONO_ARCH_FRAME_ALIGNMENT bytes
if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
}
/* the stack used in the pushed regs */
alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset;
cfg->stack_usage = alloc_size;
if (alloc_size) {
if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
}
if (cfg->frame_reg != ARMREG_SP) {
ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
}
//g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
prev_sp_offset += alloc_size;
for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
/* compute max_offset in order to use short forward jumps
* we could skip do it on arm because the immediate displacement
* for jumps is large enough, it may be useful later for constant pools
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins = bb->code;
bb->max_offset = max_offset;
MONO_BB_FOR_EACH_INS (bb, ins)
max_offset += ins_get_size (ins->opcode);
}
/* stack alignment check */
/*
{
guint8 *buf [16];
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
buf [0] = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
if (cfg->compile_aot)
ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
else
code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
code = emit_call_seq (cfg, code);
arm_patch (buf [0], code);
}
*/
/* store runtime generic context */
if (cfg->rgctx_var) {
MonoInst *ins = cfg->rgctx_var;
g_assert (ins->opcode == OP_REGOFFSET);
if (arm_is_imm12 (ins->inst_offset)) {
ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
}
mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
/* load arguments allocated to register from the stack */
cinfo = get_call_info (NULL, sig);
if (cinfo->ret.storage == RegTypeStructByAddr) {
ArgInfo *ainfo = &cinfo->ret;
inst = cfg->vret_addr;
g_assert (arm_is_imm12 (inst->inst_offset));
ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
}
if (sig->call_convention == MONO_CALL_VARARG) {
ArgInfo *cookie = &cinfo->sig_cookie;
/* Save the sig cookie address */
g_assert (cookie->storage == RegTypeBase);
g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
g_assert (arm_is_imm12 (cfg->sig_cookie));
ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
inst = cfg->args [i];
if (cfg->verbose_level > 2)
g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
if (inst->opcode == OP_REGVAR) {
if (ainfo->storage == RegTypeGeneral)
ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
else if (ainfo->storage == RegTypeFP) {
g_assert_not_reached ();
} else if (ainfo->storage == RegTypeBase) {
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
}
} else
g_assert_not_reached ();
if (i == 0 && sig->hasthis) {
g_assert (ainfo->storage == RegTypeGeneral);
mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, inst, TRUE, inst->dreg, 0, code - cfg->native_code, 0);
}
if (cfg->verbose_level > 2)
g_print ("Argument %d assigned to register %s\n", i, mono_arch_regname (inst->dreg));
} else {
switch (ainfo->storage) {
case RegTypeHFA:
for (part = 0; part < ainfo->nregs; part ++) {
if (ainfo->esize == 4)
ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
else
ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
}
break;
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeGSharedVtInReg:
case RegTypeStructByAddr:
switch (ainfo->size) {
case 1:
if (arm_is_imm12 (inst->inst_offset))
ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
break;
case 2:
if (arm_is_imm8 (inst->inst_offset)) {
ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
break;
case 8:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
if (arm_is_imm12 (inst->inst_offset + 4)) {
ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
}
break;
default:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
break;
}
if (i == 0 && sig->hasthis) {
g_assert (ainfo->storage == RegTypeGeneral);
mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, inst, FALSE, inst->inst_basereg, inst->inst_offset, code - cfg->native_code, 0);
}
break;
case RegTypeBaseGen:
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
}
if (arm_is_imm12 (inst->inst_offset + 4)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
}
break;
case RegTypeBase:
case RegTypeGSharedVtOnStack:
case RegTypeStructByAddrOnStack:
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
}
switch (ainfo->size) {
case 1:
if (arm_is_imm8 (inst->inst_offset)) {
ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
case 2:
if (arm_is_imm8 (inst->inst_offset)) {
ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
case 8:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
}
if (arm_is_imm12 (inst->inst_offset + 4)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
default:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
}
break;
case RegTypeFP: {
int imm8, rot_amount;
if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
} else
ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
if (ainfo->size == 8)
ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
else
ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
break;
}
case RegTypeStructByVal: {
int doffset = inst->inst_offset;
int soffset = 0;
int cur_reg;
int size = 0;
size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke && !sig->marshalling_disabled);
for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
if (arm_is_imm12 (doffset)) {
ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
}
soffset += sizeof (target_mgreg_t);
doffset += sizeof (target_mgreg_t);
}
if (ainfo->vtsize) {
/* FIXME: handle overrun! with struct sizes not multiple of 4 */
//g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
}
break;
}
default:
g_assert_not_reached ();
break;
}
}
}
if (method->save_lmf)
code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
if (cfg->arch.seq_point_info_var) {
MonoInst *ins = cfg->arch.seq_point_info_var;
/* Initialize the variable from a GOT slot */
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
g_assert (ins->opcode == OP_REGOFFSET);
if (arm_is_imm12 (ins->inst_offset)) {
ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
}
}
/* Initialize ss_trigger_page_var */
if (!cfg->soft_breakpoints) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
int dreg = ARMREG_LR;
if (info_var) {
g_assert (info_var->opcode == OP_REGOFFSET);
code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset);
/* Load the trigger page addr */
ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
}
}
if (cfg->arch.seq_point_ss_method_var) {
MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
g_assert (ss_method_ins->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
if (cfg->compile_aot) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
int dreg = ARMREG_LR;
g_assert (info_var->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (info_var->inst_offset));
ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
} else {
g_assert (bp_method_ins->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_B (code, 1);
*(gpointer*)code = &single_step_tramp;
code += 4;
*(gpointer*)code = breakpoint_tramp;
code += 4;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
}
}
set_code_cursor (cfg, code);
g_free (cinfo);
return code;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
int pos, i, rot_amount;
int max_epilog_size = 16 + 20*4;
guint8 *code;
CallInfo *cinfo;
if (cfg->method->save_lmf)
max_epilog_size += 128;
code = realloc_code (cfg, max_epilog_size);
/* Save the uwind state which is needed by the out-of-line code */
mono_emit_unwind_op_remember_state (cfg, code);
pos = 0;
/* Load returned vtypes into registers if needed */
cinfo = cfg->arch.cinfo;
switch (cinfo->ret.storage) {
case RegTypeStructByVal: {
MonoInst *ins = cfg->ret;
if (cinfo->ret.nregs == 1) {
if (arm_is_imm12 (ins->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
}
} else {
for (i = 0; i < cinfo->ret.nregs; ++i) {
int offset = ins->inst_offset + (i * 4);
if (arm_is_imm12 (offset)) {
ARM_LDR_IMM (code, i, ins->inst_basereg, offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, offset);
ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR);
}
}
}
break;
}
case RegTypeHFA: {
MonoInst *ins = cfg->ret;
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
else
ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
}
break;
}
default:
break;
}
if (method->save_lmf) {
int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0;
/* all but r0-r3, sp and pc */
pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
lmf_offset = pos;
code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
/* This points to r4 inside MonoLMF->iregs */
sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
reg = ARMREG_R4;
regmask = 0x9ff0; /* restore lr to pc */
/* Skip caller saved registers not used by the method */
while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
regmask &= ~(1 << reg);
sp_adj += 4;
reg ++;
}
if (iphone_abi)
/* Restored later */
regmask &= ~(1 << ARMREG_PC);
/* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
for (i = 0; i < 16; i++) {
if (regmask & (1 << i))
nused_int_regs ++;
}
mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4);
/* restore iregs */
ARM_POP (code, regmask);
if (iphone_abi) {
for (i = 0; i < 16; i++) {
if (regmask & (1 << i))
mono_emit_unwind_op_same_value (cfg, code, i);
}
/* Restore saved r7, restore LR to PC */
/* Skip lr from the lmf */
mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4);
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0);
mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
}
} else {
int i, nused_int_regs = 0;
for (i = 0; i < 16; i++) {
if (cfg->used_int_regs & (1 << i))
nused_int_regs ++;
}
if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
}
if (cfg->frame_reg != ARMREG_SP) {
mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP);
}
if (iphone_abi) {
/* Restore saved gregs */
if (cfg->used_int_regs) {
mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4);
ARM_POP (code, cfg->used_int_regs);
for (i = 0; i < 16; i++) {
if (cfg->used_int_regs & (1 << i))
mono_emit_unwind_op_same_value (cfg, code, i);
}
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
/* Restore saved r7, restore LR to PC */
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
} else {
mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4);
ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
}
}
/* Restore the unwind state to be the same as before the epilog */
mono_emit_unwind_op_restore_state (cfg, code);
set_code_cursor (cfg, code);
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
int i;
guint8 *code;
guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
int max_epilog_size = 50;
for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
exc_throw_pos [i] = NULL;
exc_throw_found [i] = 0;
}
/* count the number of exception infos */
/*
* make sure we have enough space for exceptions
*/
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_EXC) {
i = mini_exception_id_by_name ((const char*)patch_info->data.target);
if (!exc_throw_found [i]) {
max_epilog_size += 32;
exc_throw_found [i] = TRUE;
}
}
}
code = realloc_code (cfg, max_epilog_size);
/* add code to raise exceptions */
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC: {
MonoClass *exc_class;
unsigned char *ip = patch_info->ip.i + cfg->native_code;
i = mini_exception_id_by_name ((const char*)patch_info->data.target);
if (exc_throw_pos [i]) {
arm_patch (ip, exc_throw_pos [i]);
patch_info->type = MONO_PATCH_INFO_NONE;
break;
} else {
exc_throw_pos [i] = code;
}
arm_patch (ip, code);
exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
patch_info->ip.i = code - cfg->native_code;
ARM_BL (code, 0);
cfg->thunk_area += THUNK_SIZE;
*(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF;
code += 4;
break;
}
default:
/* do nothing */
break;
}
}
set_code_cursor (cfg, code);
}
#endif /* #ifndef DISABLE_JIT */
void
mono_arch_finish_init (void)
{
}
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
/* FIXME: */
return NULL;
}
#ifndef DISABLE_JIT
#endif
guint32
mono_arch_get_patch_offset (guint8 *code)
{
/* OP_AOTCONST */
return 8;
}
void
mono_arch_flush_register_windows (void)
{
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
return l;
}
/* #define ENABLE_WRONG_METHOD_CHECK 1 */
#define BASE_SIZE (6 * 4)
#define BSEARCH_ENTRY_SIZE (4 * 4)
#define CMP_SIZE (3 * 4)
#define BRANCH_SIZE (1 * 4)
#define CALL_SIZE (2 * 4)
#define WMC_SIZE (8 * 4)
#define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
static arminstr_t *
arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
{
guint32 delta = DISTANCE (target, code);
delta -= 8;
g_assert (delta >= 0 && delta <= 0xFFF);
*target = *target | delta;
*code = value;
return code + 1;
}
#ifdef ENABLE_WRONG_METHOD_CHECK
static void
mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
{
g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
g_assert (0);
}
#endif
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int size, i;
arminstr_t *code, *start;
gboolean large_offsets = FALSE;
guint32 **constant_pool_starts;
arminstr_t *vtable_target = NULL;
int extra_space = 0;
#ifdef ENABLE_WRONG_METHOD_CHECK
char * cond;
#endif
GSList *unwind_ops;
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
size = BASE_SIZE;
constant_pool_starts = g_new0 (guint32*, count);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
item->chunk_size += 32;
large_offsets = TRUE;
}
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case)
item->chunk_size += CMP_SIZE;
item->chunk_size += BRANCH_SIZE;
} else {
#ifdef ENABLE_WRONG_METHOD_CHECK
item->chunk_size += WMC_SIZE;
#endif
}
if (fail_case) {
item->chunk_size += 16;
large_offsets = TRUE;
}
item->chunk_size += CALL_SIZE;
} else {
item->chunk_size += BSEARCH_ENTRY_SIZE;
imt_entries [item->check_target_idx]->compare_done = TRUE;
}
size += item->chunk_size;
}
if (large_offsets)
size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
if (fail_tramp) {
code = (arminstr_t *)mini_alloc_generic_virtual_trampoline (vtable, size);
} else {
code = mono_mem_manager_code_reserve (mem_manager, size);
}
start = code;
unwind_ops = mono_arch_get_cie_program ();
#ifdef DEBUG_IMT
g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
}
#endif
if (large_offsets) {
ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (target_mgreg_t));
} else {
ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
}
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
gint32 vtable_offset;
item->code_target = (guint8*)code;
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
imt_method = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
}
item->jmp_code = (guint8*)code;
ARM_B_COND (code, ARMCOND_NE, 0);
} else {
/*Enable the commented code to assert on wrong method*/
#ifdef ENABLE_WRONG_METHOD_CHECK
imt_method = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
cond = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
/* Define this if your system is so bad that gdb is failing. */
#ifdef BROKEN_DEV_ENV
ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
ARM_BL (code, 0);
arm_patch (code - 1, mini_dump_bad_imt);
#else
ARM_DBRK (code);
#endif
arm_patch (cond, code);
#endif
}
if (item->has_target_code) {
/* Load target address */
target_code_ins = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
/* Save it to the fourth slot */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
/* Restore registers and branch */
ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
} else {
vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
if (!arm_is_imm12 (vtable_offset)) {
/*
* We need to branch to a computed address but we don't have
* a free register to store it, since IP must contain the
* vtable address. So we push the two values to the stack, and
* load them both using LDM.
*/
/* Compute target address */
vtable_offset_ins = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
/* Save it to the fourth slot */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
/* Restore registers and branch */
ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
} else {
ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
if (large_offsets) {
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (target_mgreg_t));
}
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
}
}
if (fail_case) {
arm_patch (item->jmp_code, (guchar*)code);
target_code_ins = code;
/* Load target address */
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
/* Save it to the fourth slot */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
/* Restore registers and branch */
ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
item->jmp_code = NULL;
}
if (imt_method)
code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key);
/*must emit after unconditional branch*/
if (vtable_target) {
code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable);
item->chunk_size += 4;
vtable_target = NULL;
}
/*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
constant_pool_starts [i] = code;
if (extra_space) {
code += extra_space;
extra_space = 0;
}
} else {
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
item->jmp_code = (guint8*)code;
ARM_B_COND (code, ARMCOND_HS, 0);
++extra_space;
}
}
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code) {
if (item->check_target_idx)
arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
}
if (i > 0 && item->is_equals) {
int j;
arminstr_t *space_start = constant_pool_starts [i];
for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key);
}
}
}
#ifdef DEBUG_IMT
{
char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count);
mono_disassemble_code (NULL, (guint8*)start, size, buff);
g_free (buff);
}
#endif
g_free (constant_pool_starts);
mono_arch_flush_icache ((guint8*)start, size);
MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
g_assert (DISTANCE (start, code) <= size);
mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), mem_manager);
return start;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->regs [reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->regs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->regs [reg] = val;
}
/*
* mono_arch_get_trampolines:
*
* Return a list of MonoTrampInfo structures describing arch specific trampolines
* for AOT.
*/
GSList *
mono_arch_get_trampolines (gboolean aot)
{
return mono_arm_get_exception_trampolines (aot);
}
#if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
/*
* mono_arch_set_breakpoint:
*
* Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
* The location should contain code emitted by OP_SEQ_POINT.
*/
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
guint32 native_offset = ip - (guint8*)ji->code_start;
if (ji->from_aot) {
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (!breakpoint_tramp)
breakpoint_tramp = mini_get_breakpoint_trampoline ();
g_assert (native_offset % 4 == 0);
g_assert (info->bp_addrs [native_offset / 4] == 0);
info->bp_addrs [native_offset / 4] = (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page);
} else if (mini_debug_options.soft_breakpoints) {
code += 4;
ARM_BLX_REG (code, ARMREG_LR);
mono_arch_flush_icache (code - 4, 4);
} else {
int dreg = ARMREG_LR;
/* Read from another trigger page */
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(int*)code = (int)(gssize)bp_trigger_page;
code += 4;
ARM_LDR_IMM (code, dreg, dreg, 0);
mono_arch_flush_icache (code - 16, 16);
#if 0
/* This is currently implemented by emitting an SWI instruction, which
* qemu/linux seems to convert to a SIGILL.
*/
*(int*)code = (0xef << 24) | 8;
code += 4;
mono_arch_flush_icache (code - 4, 4);
#endif
}
}
/*
* mono_arch_clear_breakpoint:
*
* Clear the breakpoint at IP.
*/
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
int i;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (!breakpoint_tramp)
breakpoint_tramp = mini_get_breakpoint_trampoline ();
g_assert (native_offset % 4 == 0);
g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page));
info->bp_addrs [native_offset / 4] = 0;
} else if (mini_debug_options.soft_breakpoints) {
code += 4;
ARM_NOP (code);
mono_arch_flush_icache (code - 4, 4);
} else {
for (i = 0; i < 4; ++i)
ARM_NOP (code);
mono_arch_flush_icache (ip, code - ip);
}
}
/*
* mono_arch_start_single_stepping:
*
* Start single stepping.
*/
void
mono_arch_start_single_stepping (void)
{
if (ss_trigger_page)
mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
else
single_step_tramp = mini_get_single_step_trampoline ();
}
/*
* mono_arch_stop_single_stepping:
*
* Stop single stepping.
*/
void
mono_arch_stop_single_stepping (void)
{
if (ss_trigger_page)
mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
else
single_step_tramp = NULL;
}
#if __APPLE__
#define DBG_SIGNAL SIGBUS
#else
#define DBG_SIGNAL SIGSEGV
#endif
/*
* mono_arch_is_single_step_event:
*
* Return whenever the machine state in SIGCTX corresponds to a single
* step event.
*/
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
siginfo_t *sinfo = (siginfo_t*)info;
if (!ss_trigger_page)
return FALSE;
/* Sometimes the address is off by 4 */
if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
return TRUE;
else
return FALSE;
}
/*
* mono_arch_is_breakpoint_event:
*
* Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
*/
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
siginfo_t *sinfo = (siginfo_t*)info;
if (!ss_trigger_page)
return FALSE;
if (sinfo->si_signo == DBG_SIGNAL) {
/* Sometimes the address is off by 4 */
if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
return TRUE;
else
return FALSE;
} else {
return FALSE;
}
}
/*
* mono_arch_skip_breakpoint:
*
* See mini-amd64.c for docs.
*/
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
}
/*
* mono_arch_skip_single_step:
*
* See mini-amd64.c for docs.
*/
void
mono_arch_skip_single_step (MonoContext *ctx)
{
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
}
/*
* mono_arch_get_seq_point_info:
*
* See mini-amd64.c for docs.
*/
SeqPointInfo*
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
jit_mm = get_default_jit_mm ();
// FIXME: Add a free function
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
info->ss_trigger_page = ss_trigger_page;
info->bp_trigger_page = bp_trigger_page;
info->ss_tramp_addr = &single_step_tramp;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
#endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
/*
* mono_arch_set_target:
*
* Set the target architecture the JIT backend should generate code for, in the form
* of a GNU target triplet. Only used in AOT mode.
*/
void
mono_arch_set_target (char *mtriple)
{
/* The GNU target triple format is not very well documented */
if (strstr (mtriple, "armv7")) {
v5_supported = TRUE;
v6_supported = TRUE;
v7_supported = TRUE;
}
if (strstr (mtriple, "armv6")) {
v5_supported = TRUE;
v6_supported = TRUE;
}
if (strstr (mtriple, "armv7s")) {
v7s_supported = TRUE;
}
if (strstr (mtriple, "armv7k")) {
v7k_supported = TRUE;
}
if (strstr (mtriple, "thumbv7s")) {
v5_supported = TRUE;
v6_supported = TRUE;
v7_supported = TRUE;
v7s_supported = TRUE;
thumb_supported = TRUE;
thumb2_supported = TRUE;
}
if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
v5_supported = TRUE;
v6_supported = TRUE;
thumb_supported = TRUE;
iphone_abi = TRUE;
}
if (strstr (mtriple, "gnueabi"))
eabi_supported = TRUE;
}
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
return v7_supported;
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8:
return v7_supported && IS_VFP;
default:
return FALSE;
}
}
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
return get_call_info (mp, sig);
}
gpointer
mono_arch_get_get_tls_tramp (void)
{
return NULL;
}
static G_GNUC_UNUSED guint8*
emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data)
{
/* OP_AOTCONST */
mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
/* Load the value from the GOT */
ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
return code;
}
guint8*
mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data)
{
MonoJumpInfo **ji = (MonoJumpInfo**)ji_list;
*ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data);
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
return code;
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
gpointer target = NULL;
switch (jit_icall_id) {
#undef MONO_AOT_ICALL
#define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
MONO_AOT_ICALL (mono_arm_resume_unwind)
MONO_AOT_ICALL (mono_arm_start_gsharedvt_call)
MONO_AOT_ICALL (mono_arm_throw_exception)
MONO_AOT_ICALL (mono_arm_throw_exception_by_token)
MONO_AOT_ICALL (mono_arm_unaligned_stack)
}
return target;
}
| /**
* \file
* ARM backend for the Mono code generator
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include <string.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/tokentype.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/unlocked.h>
#include "interp/interp.h"
#include "mini-arm.h"
#include "cpu-arm.h"
#include "ir-emit.h"
#include "mini-gc.h"
#include "mini-runtime.h"
#include "aot-runtime.h"
#include "mono/arch/arm/arm-vfp-codegen.h"
#include "mono/utils/mono-tls-inline.h"
/* Sanity check: This makes no sense */
#if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
#error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
#endif
/*
* IS_SOFT_FLOAT: Is full software floating point used?
* IS_HARD_FLOAT: Is full hardware floating point used?
* IS_VFP: Is hardware floating point with software ABI used?
*
* These are not necessarily constants, e.g. IS_SOFT_FLOAT and
* IS_VFP may delegate to mono_arch_is_soft_float ().
*/
#if defined(ARM_FPU_VFP_HARD)
#define IS_SOFT_FLOAT (FALSE)
#define IS_HARD_FLOAT (TRUE)
#define IS_VFP (TRUE)
#elif defined(ARM_FPU_NONE)
#define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
#define IS_HARD_FLOAT (FALSE)
#define IS_VFP (!mono_arch_is_soft_float ())
#else
#define IS_SOFT_FLOAT (FALSE)
#define IS_HARD_FLOAT (FALSE)
#define IS_VFP (TRUE)
#endif
#define THUNK_SIZE (3 * 4)
#if __APPLE__
G_BEGIN_DECLS
void sys_icache_invalidate (void *start, size_t len);
G_END_DECLS
#endif
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
#define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
static mono_mutex_t mini_arch_mutex;
static gboolean v5_supported = FALSE;
static gboolean v6_supported = FALSE;
static gboolean v7_supported = FALSE;
static gboolean v7s_supported = FALSE;
static gboolean v7k_supported = FALSE;
static gboolean thumb_supported = FALSE;
static gboolean thumb2_supported = FALSE;
/*
* Whenever to use the ARM EABI
*/
static gboolean eabi_supported = FALSE;
/*
* Whenever to use the iphone ABI extensions:
* http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
* Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
* This is required for debugging/profiling tools to work, but it has some overhead so it should
* only be turned on in debug builds.
*/
static gboolean iphone_abi = FALSE;
/*
* The FPU we are generating code for. This is NOT runtime configurable right now,
* since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
*/
static MonoArmFPU arm_fpu;
#if defined(ARM_FPU_VFP_HARD)
/*
* On armhf, d0-d7 are used for argument passing and d8-d15
* must be preserved across calls, which leaves us no room
* for scratch registers. So we use d14-d15 but back up their
* previous contents to a stack slot before using them - see
* mono_arm_emit_vfp_scratch_save/_restore ().
*/
static int vfp_scratch1 = ARM_VFP_D14;
static int vfp_scratch2 = ARM_VFP_D15;
#else
/*
* On armel, d0-d7 do not need to be preserved, so we can
* freely make use of them as scratch registers.
*/
static int vfp_scratch1 = ARM_VFP_D0;
static int vfp_scratch2 = ARM_VFP_D1;
#endif
static int i8_align;
static gpointer single_step_tramp, breakpoint_tramp;
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
*/
static gpointer ss_trigger_page;
/* Enabled breakpoints read from this trigger page */
static gpointer bp_trigger_page;
/*
* TODO:
* floating point support: on ARM it is a mess, there are at least 3
* different setups, each of which binary incompat with the other.
* 1) FPA: old and ugly, but unfortunately what current distros use
* the double binary format has the two words swapped. 8 double registers.
* Implemented usually by kernel emulation.
* 2) softfloat: the compiler emulates all the fp ops. Usually uses the
* ugly swapped double format (I guess a softfloat-vfp exists, too, though).
* 3) VFP: the new and actually sensible and useful FP support. Implemented
* in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
*
* We do not care about FPA. We will support soft float and VFP.
*/
#define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
#define arm_is_imm8(v) ((v) > -256 && (v) < 256)
#define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
#define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
#define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
#define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
//#define DEBUG_IMT 0
#ifndef DISABLE_JIT
static void mono_arch_compute_omit_fp (MonoCompile *cfg);
#endif
static guint8*
emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data);
const char*
mono_arch_regname (int reg)
{
static const char * rnames[] = {
"arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
"arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
"arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
"arm_pc"
};
if (reg >= 0 && reg < 16)
return rnames [reg];
return "unknown";
}
const char*
mono_arch_fregname (int reg)
{
static const char * rnames[] = {
"arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
"arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
"arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
"arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
"arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
"arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
"arm_f30", "arm_f31"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown";
}
#ifndef DISABLE_JIT
static guint8*
emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp)
{
int imm8, rot_amount;
g_assert (temp == ARMREG_IP || temp == ARMREG_LR);
if (imm == 0) {
if (sreg != dreg)
ARM_MOV_REG_REG (code, dreg, sreg);
} else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
return code;
}
if (dreg == sreg) {
code = mono_arm_emit_load_imm (code, temp, imm);
ARM_ADD_REG_REG (code, dreg, sreg, temp);
} else {
code = mono_arm_emit_load_imm (code, dreg, imm);
ARM_ADD_REG_REG (code, dreg, dreg, sreg);
}
return code;
}
static guint8*
emit_big_add (guint8 *code, int dreg, int sreg, int imm)
{
return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP);
}
static guint8*
emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_imm12 (imm)) {
g_assert (dreg != sreg);
code = emit_big_add (code, dreg, sreg, imm);
ARM_LDR_IMM (code, dreg, dreg, 0);
} else {
ARM_LDR_IMM (code, dreg, sreg, imm);
}
return code;
}
/* If dreg == sreg, this clobbers IP */
static guint8*
emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
{
int imm8, rot_amount;
if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
return code;
}
if (dreg == sreg) {
code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
} else {
code = mono_arm_emit_load_imm (code, dreg, imm);
ARM_SUB_REG_REG (code, dreg, dreg, sreg);
}
return code;
}
static guint8*
emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
{
/* we can use r0-r3, since this is called only for incoming args on the stack */
if (size > sizeof (target_mgreg_t) * 4) {
guint8 *start_loop;
code = emit_big_add (code, ARMREG_R0, sreg, soffset);
code = emit_big_add (code, ARMREG_R1, dreg, doffset);
start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (code - 4, start_loop);
return code;
}
if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
while (size >= 4) {
ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
doffset += 4;
soffset += 4;
size -= 4;
}
} else if (size) {
code = emit_big_add (code, ARMREG_R0, sreg, soffset);
code = emit_big_add (code, ARMREG_R1, dreg, doffset);
doffset = soffset = 0;
while (size >= 4) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
doffset += 4;
soffset += 4;
size -= 4;
}
}
g_assert (size == 0);
return code;
}
static guint8*
emit_jmp_reg (guint8 *code, int reg)
{
if (thumb_supported)
ARM_BX (code, reg);
else
ARM_MOV_REG_REG (code, ARMREG_PC, reg);
return code;
}
static guint8*
emit_call_reg (guint8 *code, int reg)
{
if (v5_supported) {
ARM_BLX_REG (code, reg);
} else {
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
return emit_jmp_reg (code, reg);
}
return code;
}
static guint8*
emit_call_seq (MonoCompile *cfg, guint8 *code)
{
if (cfg->method->dynamic) {
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
code = emit_call_reg (code, ARMREG_IP);
} else {
ARM_BL (code, 0);
}
cfg->thunk_area += THUNK_SIZE;
return code;
}
guint8*
mono_arm_patchable_b (guint8 *code, int cond)
{
ARM_B_COND (code, cond, 0);
return code;
}
guint8*
mono_arm_patchable_bl (guint8 *code, int cond)
{
ARM_BL_COND (code, cond, 0);
return code;
}
#if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
#define HAVE_AEABI_READ_TP 1
#endif
#ifdef HAVE_AEABI_READ_TP
G_BEGIN_DECLS
gpointer __aeabi_read_tp (void);
G_END_DECLS
#endif
gboolean
mono_arch_have_fast_tls (void)
{
#ifdef HAVE_AEABI_READ_TP
static gboolean have_fast_tls = FALSE;
static gboolean inited = FALSE;
if (mini_debug_options.use_fallback_tls)
return FALSE;
if (inited)
return have_fast_tls;
if (v7_supported) {
gpointer tp1, tp2;
tp1 = __aeabi_read_tp ();
asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2));
have_fast_tls = tp1 && tp1 == tp2;
}
inited = TRUE;
return have_fast_tls;
#else
return FALSE;
#endif
}
static guint8*
emit_tls_get (guint8 *code, int dreg, int tls_offset)
{
g_assert (v7_supported);
ARM_MRC (code, 15, 0, dreg, 13, 0, 3);
ARM_LDR_IMM (code, dreg, dreg, tls_offset);
return code;
}
static guint8*
emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1;
g_assert (v7_supported);
ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3);
ARM_STR_IMM (code, sreg, tp_reg, tls_offset);
return code;
}
/*
* emit_save_lmf:
*
* Emit code to push an LMF structure on the LMF stack.
* On arm, this is intermixed with the initialization of other fields of the structure.
*/
static guint8*
emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
{
int i;
if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) {
code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR));
} else {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern));
code = emit_call_seq (cfg, code);
}
/* we build the MonoLMF structure on the stack - see mini-arm.h */
/* lmf_offset is the offset from the previous stack pointer,
* alloc_size is the total stack space allocated, so the offset
* of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
* The pointer to the struct is put in r1 (new_lmf).
* ip is used as scratch
* The callee-saved registers are already in the MonoLMF structure
*/
code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
/* r0 is the result from mono_get_lmf_addr () */
ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
/* new_lmf->previous_lmf = *lmf_addr */
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* *(lmf_addr) = r1 */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* Skip method (only needed for trampoline LMF frames) */
ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
/* save the current IP */
ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t))
mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
return code;
}
typedef struct {
gint32 vreg;
gint32 hreg;
} FloatArgData;
static guint8 *
emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
{
GSList *list;
set_code_cursor (cfg, code);
for (list = inst->float_args; list; list = list->next) {
FloatArgData *fad = (FloatArgData*)list->data;
MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
gboolean imm = arm_is_fpimm8 (var->inst_offset);
/* 4+1 insns for emit_big_add () and 1 for FLDS. */
if (!imm)
*max_len += 20 + 4;
*max_len += 4;
code = realloc_code (cfg, *max_len);
if (!imm) {
code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
} else
ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
set_code_cursor (cfg, code);
*offset = code - cfg->native_code;
}
return code;
}
static guint8 *
mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
{
MonoInst *inst;
g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
if (IS_HARD_FLOAT) {
if (!arm_is_fpimm8 (inst->inst_offset)) {
code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
ARM_FSTD (code, reg, ARMREG_LR, 0);
} else
ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
}
return code;
}
static guint8 *
mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
{
MonoInst *inst;
g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
if (IS_HARD_FLOAT) {
if (!arm_is_fpimm8 (inst->inst_offset)) {
code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
ARM_FLDD (code, reg, ARMREG_LR, 0);
} else
ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
}
return code;
}
/*
* emit_restore_lmf:
*
* Emit code to pop an LMF structure from the LMF stack.
*/
static guint8*
emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
{
int basereg, offset;
if (lmf_offset < 32) {
basereg = cfg->frame_reg;
offset = lmf_offset;
} else {
basereg = ARMREG_R2;
offset = 0;
code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
}
/* ip = previous_lmf */
ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
/* lr = lmf_addr */
ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
/* *(lmf_addr) = previous_lmf */
ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
return code;
}
#endif /* #ifndef DISABLE_JIT */
/*
* mono_arch_get_argument_info:
* @csig: a method signature
* @param_count: the number of parameters to consider
* @arg_info: an array to store the result infos
*
* Gathers information on parameters such as size, alignment and
* padding. arg_info should be large enought to hold param_count + 1 entries.
*
* Returns the size of the activation frame.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k, frame_size = 0;
guint32 size, align, pad;
int offset = 8;
MonoType *t;
t = mini_get_underlying_type (csig->ret);
if (MONO_TYPE_ISSTRUCT (t)) {
frame_size += sizeof (target_mgreg_t);
offset += 4;
}
arg_info [0].offset = offset;
if (csig->hasthis) {
frame_size += sizeof (target_mgreg_t);
offset += 4;
}
arg_info [0].size = frame_size;
for (k = 0; k < param_count; k++) {
size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke && !csig->marshalling_disabled);
/* ignore alignment for now */
align = 1;
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
frame_size += size;
arg_info [k + 1].pad = 0;
arg_info [k + 1].size = size;
offset += pad;
arg_info [k + 1].offset = offset;
offset += size;
}
align = MONO_ARCH_FRAME_ALIGNMENT;
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
return frame_size;
}
#define MAX_ARCH_DELEGATE_PARAMS 3
static guint8*
get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count)
{
guint8 *code, *start;
GSList *unwind_ops = mono_arch_get_cie_program ();
if (has_target) {
start = code = mono_global_codeman_reserve (12);
/* Replace the this argument with the target */
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
g_assert ((code - start) <= 12);
mono_arch_flush_icache (start, 12);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
} else {
int size, i;
size = 8 + param_count * 4;
start = code = mono_global_codeman_reserve (size);
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
/* slide down the arguments */
for (i = 0; i < param_count; ++i) {
ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
}
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
} else {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
*info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
g_free (name);
}
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
return start;
}
/*
* mono_arch_get_delegate_invoke_impls:
*
* Return a list of MonoAotTrampInfo structures for the delegate invoke impl
* trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
MonoTrampInfo *info;
int i;
get_delegate_invoke_impl (&info, TRUE, 0);
res = g_slist_prepend (res, info);
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
get_delegate_invoke_impl (&info, FALSE, i);
res = g_slist_prepend (res, info);
}
return res;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
MonoType *sig_ret;
/* FIXME: Support more cases */
sig_ret = mini_get_underlying_type (sig->ret);
if (MONO_TYPE_ISSTRUCT (sig_ret))
return NULL;
if (has_target) {
static guint8* cached = NULL;
mono_mini_arch_lock ();
if (cached) {
mono_mini_arch_unlock ();
return cached;
}
if (mono_ee_features.use_aot_trampolines) {
start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, TRUE, 0);
mono_tramp_info_register (info, NULL);
}
cached = start;
mono_mini_arch_unlock ();
return cached;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
mono_mini_arch_lock ();
code = cache [sig->param_count];
if (code) {
mono_mini_arch_unlock ();
return code;
}
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8*)mono_aot_get_trampoline (name);
g_free (name);
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
mono_tramp_info_register (info, NULL);
}
cache [sig->param_count] = start;
mono_mini_arch_unlock ();
return start;
}
return NULL;
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
return NULL;
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [ARMREG_R0];
}
/*
* Initialize the cpu to execute managed code.
*/
void
mono_arch_cpu_init (void)
{
i8_align = MONO_ABI_ALIGNOF (gint64);
#ifdef MONO_CROSS_COMPILE
/* Need to set the alignment of i8 since it can different on the target */
#ifdef TARGET_ANDROID
/* linux gnueabi */
mono_type_set_alignment (MONO_TYPE_I8, i8_align);
#endif
#endif
}
/*
* Initialize architecture specific code.
*/
void
mono_arch_init (void)
{
char *cpu_arch;
#ifdef TARGET_WATCHOS
mini_debug_options.soft_breakpoints = TRUE;
#endif
mono_os_mutex_init_recursive (&mini_arch_mutex);
if (mini_debug_options.soft_breakpoints) {
if (!mono_aot_only)
breakpoint_tramp = mini_get_breakpoint_trampoline ();
} else {
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
}
#if defined(__ARM_EABI__)
eabi_supported = TRUE;
#endif
#if defined(ARM_FPU_VFP_HARD)
arm_fpu = MONO_ARM_FPU_VFP_HARD;
#else
arm_fpu = MONO_ARM_FPU_VFP;
#if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
/*
* If we're compiling with a soft float fallback and it
* turns out that no VFP unit is available, we need to
* switch to soft float. We don't do this for iOS, since
* iOS devices always have a VFP unit.
*/
if (!mono_hwcap_arm_has_vfp)
arm_fpu = MONO_ARM_FPU_NONE;
/*
* This environment variable can be useful in testing
* environments to make sure the soft float fallback
* works. Most ARM devices have VFP units these days, so
* normally soft float code would not be exercised much.
*/
char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
if (soft && !strncmp (soft, "1", 1))
arm_fpu = MONO_ARM_FPU_NONE;
g_free (soft);
#endif
#endif
v5_supported = mono_hwcap_arm_is_v5;
v6_supported = mono_hwcap_arm_is_v6;
v7_supported = mono_hwcap_arm_is_v7;
/*
* On weird devices, the hwcap code may fail to detect
* the ARM version. In that case, we can at least safely
* assume the version the runtime was compiled for.
*/
#ifdef HAVE_ARMV5
v5_supported = TRUE;
#endif
#ifdef HAVE_ARMV6
v6_supported = TRUE;
#endif
#ifdef HAVE_ARMV7
v7_supported = TRUE;
#endif
#if defined(TARGET_IOS)
/* iOS is special-cased here because we don't yet
have a way to properly detect CPU features on it. */
thumb_supported = TRUE;
iphone_abi = TRUE;
#elif defined(TARGET_ANDROID)
thumb_supported = TRUE;
#else
thumb_supported = mono_hwcap_arm_has_thumb;
thumb2_supported = mono_hwcap_arm_has_thumb2;
#endif
/* Format: armv(5|6|7[s])[-thumb[2]] */
cpu_arch = g_getenv ("MONO_CPU_ARCH");
/* Do this here so it overrides any detection. */
if (cpu_arch) {
if (strncmp (cpu_arch, "armv", 4) == 0) {
v5_supported = cpu_arch [4] >= '5';
v6_supported = cpu_arch [4] >= '6';
v7_supported = cpu_arch [4] >= '7';
v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0;
}
thumb_supported = strstr (cpu_arch, "thumb") != NULL;
thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
g_free (cpu_arch);
}
}
/*
* Cleanup architecture specific code.
*/
void
mono_arch_cleanup (void)
{
}
/*
* This function returns the optimizations supported on this cpu.
*/
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
/* no arm-specific optimizations yet */
*exclude_mask = 0;
return 0;
}
gboolean
mono_arm_is_hard_float (void)
{
return arm_fpu == MONO_ARM_FPU_VFP_HARD;
}
#ifndef DISABLE_JIT
gboolean
mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
{
if (v7s_supported || v7k_supported) {
switch (opcode) {
case OP_IDIV:
case OP_IREM:
case OP_IDIV_UN:
case OP_IREM_UN:
return FALSE;
default:
break;
}
}
return TRUE;
}
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
gboolean
mono_arch_is_soft_float (void)
{
return arm_fpu == MONO_ARM_FPU_NONE;
}
#endif
static gboolean
is_regsize_var (MonoType *t)
{
if (m_type_is_byref (t))
return TRUE;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return TRUE;
case MONO_TYPE_OBJECT:
return TRUE;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t))
return TRUE;
return FALSE;
case MONO_TYPE_VALUETYPE:
return FALSE;
}
return FALSE;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
/* we can only allocate 32 bit values */
if (is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
}
}
return vars;
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
mono_arch_compute_omit_fp (cfg);
/*
* FIXME: Interface calls might go through a static rgctx trampoline which
* sets V5, but it doesn't save it, so we need to save it ourselves, and
* avoid using it.
*/
if (cfg->flags & MONO_CFG_HAS_CALLS)
cfg->uses_rgctx_reg = TRUE;
if (cfg->arch.omit_fp)
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
if (iphone_abi)
/* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
else
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
/*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
return regs;
}
/*
* mono_arch_regalloc_cost:
*
* Return the cost, in number of memory references, of the action of
* allocating the variable VMV into a register during global register
* allocation.
*/
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
/* FIXME: */
return 2;
}
#endif /* #ifndef DISABLE_JIT */
void
mono_arch_flush_icache (guint8 *code, gint size)
{
#if defined(MONO_CROSS_COMPILE)
#elif __APPLE__
sys_icache_invalidate (code, size);
#else
__builtin___clear_cache ((char*)code, (char*)code + size);
#endif
}
#define DEBUG(a)
static void inline
add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
{
if (simple) {
if (*gr > ARMREG_R3) {
ainfo->size = 4;
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->storage = RegTypeBase;
*stack_size += 4;
} else {
ainfo->storage = RegTypeGeneral;
ainfo->reg = *gr;
}
} else {
gboolean split;
if (eabi_supported)
split = i8_align == 4;
else
split = TRUE;
ainfo->size = 8;
if (*gr == ARMREG_R3 && split) {
/* first word in r3 and the second on the stack */
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->storage = RegTypeBaseGen;
*stack_size += 4;
} else if (*gr >= ARMREG_R3) {
if (eabi_supported) {
/* darwin aligns longs to 4 byte only */
if (i8_align == 8) {
*stack_size += 7;
*stack_size &= ~7;
}
}
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP; /* in the caller */
ainfo->storage = RegTypeBase;
*stack_size += 8;
} else {
if (eabi_supported) {
if (i8_align == 8 && ((*gr) & 1))
(*gr) ++;
}
ainfo->storage = RegTypeIRegPair;
ainfo->reg = *gr;
}
(*gr) ++;
}
(*gr) ++;
}
static void inline
add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
{
/*
* If we're calling a function like this:
*
* void foo(float a, double b, float c)
*
* We pass a in s0 and b in d1. That leaves us
* with s1 being unused. The armhf ABI recognizes
* this and requires register assignment to then
* use that for the next single-precision arg,
* i.e. c in this example. So float_spare either
* tells us which reg to use for the next single-
* precision arg, or it's -1, meaning use *fpr.
*
* Note that even though most of the JIT speaks
* double-precision, fpr represents single-
* precision registers.
*
* See parts 5.5 and 6.1.2 of the AAPCS for how
* this all works.
*/
if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
ainfo->storage = RegTypeFP;
if (is_double) {
/*
* If we're passing a double-precision value
* and *fpr is odd (e.g. it's s1, s3, ...)
* we need to use the next even register. So
* we mark the current *fpr as a spare that
* can be used for the next single-precision
* value.
*/
if (*fpr % 2) {
*float_spare = *fpr;
(*fpr)++;
}
/*
* At this point, we have an even register
* so we assign that and move along.
*/
ainfo->reg = *fpr;
*fpr += 2;
} else if (*float_spare >= 0) {
/*
* We're passing a single-precision value
* and it looks like a spare single-
* precision register is available. Let's
* use it.
*/
ainfo->reg = *float_spare;
*float_spare = -1;
} else {
/*
* If we hit this branch, we're passing a
* single-precision value and we can simply
* use the next available register.
*/
ainfo->reg = *fpr;
(*fpr)++;
}
} else {
/*
* We've exhausted available floating point
* regs, so pass the rest on the stack.
*/
if (is_double) {
*stack_size += 7;
*stack_size &= ~7;
}
ainfo->offset = *stack_size;
ainfo->reg = ARMREG_SP;
ainfo->storage = RegTypeBase;
*stack_size += is_double ? 8 : 4;
}
}
static gboolean
is_hfa (MonoType *t, int *out_nfields, int *out_esize)
{
MonoClass *klass;
gpointer iter;
MonoClassField *field;
MonoType *ftype, *prev_ftype = NULL;
int nfields = 0;
klass = mono_class_from_mono_type_internal (t);
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
ftype = mono_field_get_type_internal (field);
ftype = mini_get_underlying_type (ftype);
if (MONO_TYPE_ISSTRUCT (ftype)) {
int nested_nfields, nested_esize;
if (!is_hfa (ftype, &nested_nfields, &nested_esize))
return FALSE;
if (nested_esize == 4)
ftype = m_class_get_byval_arg (mono_defaults.single_class);
else
ftype = m_class_get_byval_arg (mono_defaults.double_class);
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
nfields += nested_nfields;
} else {
if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
return FALSE;
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
nfields ++;
}
}
if (nfields == 0 || nfields > 4)
return FALSE;
*out_nfields = nfields;
*out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
return TRUE;
}
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
guint i, gr, fpr, pstart;
gint float_spare;
int n = sig->hasthis + sig->param_count;
int nfields, esize;
guint32 align;
MonoType *t;
guint32 stack_size = 0;
CallInfo *cinfo;
gboolean is_pinvoke = sig->pinvoke;
gboolean vtype_retaddr = FALSE;
if (mp)
cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
gr = ARMREG_R0;
fpr = ARM_VFP_F0;
float_spare = -1;
t = mini_get_underlying_type (sig->ret);
switch (t->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
cinfo->ret.storage = RegTypeGeneral;
cinfo->ret.reg = ARMREG_R0;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
cinfo->ret.storage = RegTypeIRegPair;
cinfo->ret.reg = ARMREG_R0;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
cinfo->ret.storage = RegTypeFP;
if (t->type == MONO_TYPE_R4)
cinfo->ret.size = 4;
else
cinfo->ret.size = 8;
if (IS_HARD_FLOAT) {
cinfo->ret.reg = ARM_VFP_F0;
} else {
cinfo->ret.reg = ARMREG_R0;
}
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
cinfo->ret.storage = RegTypeGeneral;
cinfo->ret.reg = ARMREG_R0;
break;
}
if (mini_is_gsharedvt_variable_type (t)) {
cinfo->ret.storage = RegTypeStructByAddr;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
cinfo->ret.storage = RegTypeHFA;
cinfo->ret.reg = 0;
cinfo->ret.nregs = nfields;
cinfo->ret.esize = esize;
} else {
if (sig->pinvoke && !sig->marshalling_disabled) {
int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
int max_size;
#ifdef TARGET_WATCHOS
max_size = 16;
#else
max_size = 4;
#endif
if (native_size <= max_size) {
cinfo->ret.storage = RegTypeStructByVal;
cinfo->ret.struct_size = native_size;
cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4;
} else {
cinfo->ret.storage = RegTypeStructByAddr;
}
} else {
cinfo->ret.storage = RegTypeStructByAddr;
}
}
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (t));
cinfo->ret.storage = RegTypeStructByAddr;
break;
case MONO_TYPE_VOID:
break;
default:
g_error ("Can't handle as return value 0x%x", sig->ret->type);
}
vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr;
pstart = 0;
n = 0;
/*
* To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
* the first argument, allowing 'this' to be always passed in the first arg reg.
* Also do this if the first argument is a reference type, since virtual calls
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
} else {
add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
pstart = 1;
}
n ++;
cinfo->ret.reg = gr;
gr ++;
cinfo->vret_arg_index = 1;
} else {
/* this */
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
n ++;
}
if (vtype_retaddr) {
cinfo->ret.reg = gr;
gr ++;
}
}
DEBUG(g_print("params: %d\n", sig->param_count));
for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [n];
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
gr = ARMREG_R3 + 1;
fpr = ARM_VFP_F16;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
}
DEBUG(g_print("param %d: ", i));
if (m_type_is_byref (sig->params [i])) {
DEBUG(g_print("byref\n"));
add_general (&gr, &stack_size, ainfo, TRUE);
n++;
continue;
}
t = mini_get_underlying_type (sig->params [i]);
switch (t->type) {
case MONO_TYPE_I1:
cinfo->args [n].is_signed = 1;
case MONO_TYPE_U1:
cinfo->args [n].size = 1;
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_I2:
cinfo->args [n].is_signed = 1;
case MONO_TYPE_U2:
cinfo->args [n].size = 2;
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
cinfo->args [n].size = 4;
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
cinfo->args [n].size = sizeof (target_mgreg_t);
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
cinfo->args [n].size = sizeof (target_mgreg_t);
add_general (&gr, &stack_size, ainfo, TRUE);
break;
}
if (mini_is_gsharedvt_variable_type (t)) {
/* gsharedvt arguments are passed by ref */
g_assert (mini_is_gsharedvt_type (t));
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
ainfo->storage = RegTypeGSharedVtInReg;
break;
case RegTypeBase:
ainfo->storage = RegTypeGSharedVtOnStack;
break;
default:
g_assert_not_reached ();
}
break;
}
/* Fall through */
case MONO_TYPE_TYPEDBYREF:
case MONO_TYPE_VALUETYPE: {
gint size;
int align_size;
int nwords, nfields, esize;
guint32 align;
if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
if (fpr + nfields < ARM_VFP_F16) {
ainfo->storage = RegTypeHFA;
ainfo->reg = fpr;
ainfo->nregs = nfields;
ainfo->esize = esize;
if (esize == 4)
fpr += nfields;
else
fpr += nfields * 2;
break;
} else {
fpr = ARM_VFP_F16;
}
}
if (t->type == MONO_TYPE_TYPEDBYREF) {
size = MONO_ABI_SIZEOF (MonoTypedRef);
align = sizeof (target_mgreg_t);
} else {
MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]);
if (sig->pinvoke && !sig->marshalling_disabled)
size = mono_class_native_size (klass, &align);
else
size = mini_type_stack_size_full (t, &align, FALSE);
}
DEBUG(g_print ("load %d bytes struct\n", size));
#ifdef TARGET_WATCHOS
/* Watchos pass large structures by ref */
/* We only do this for pinvoke to make gsharedvt/dyncall simpler */
if (sig->pinvoke && size > 16) {
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
ainfo->storage = RegTypeStructByAddr;
break;
case RegTypeBase:
ainfo->storage = RegTypeStructByAddrOnStack;
break;
default:
g_assert_not_reached ();
break;
}
break;
}
#endif
align_size = size;
nwords = 0;
align_size += (sizeof (target_mgreg_t) - 1);
align_size &= ~(sizeof (target_mgreg_t) - 1);
nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t);
ainfo->storage = RegTypeStructByVal;
ainfo->struct_size = size;
ainfo->align = align;
if (eabi_supported) {
if (align >= 8 && (gr & 1))
gr ++;
}
if (gr > ARMREG_R3) {
ainfo->size = 0;
ainfo->vtsize = nwords;
} else {
int rest = ARMREG_R3 - gr + 1;
int n_in_regs = rest >= nwords? nwords: rest;
ainfo->size = n_in_regs;
ainfo->vtsize = nwords - n_in_regs;
ainfo->reg = gr;
gr += n_in_regs;
nwords -= n_in_regs;
}
stack_size = ALIGN_TO (stack_size, align);
ainfo->offset = stack_size;
/*g_print ("offset for arg %d at %d\n", n, stack_size);*/
stack_size += nwords * sizeof (target_mgreg_t);
break;
}
case MONO_TYPE_U8:
case MONO_TYPE_I8:
ainfo->size = 8;
add_general (&gr, &stack_size, ainfo, FALSE);
break;
case MONO_TYPE_R4:
ainfo->size = 4;
if (IS_HARD_FLOAT)
add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
else
add_general (&gr, &stack_size, ainfo, TRUE);
break;
case MONO_TYPE_R8:
ainfo->size = 8;
if (IS_HARD_FLOAT)
add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
else
add_general (&gr, &stack_size, ainfo, FALSE);
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt arguments are passed by ref */
g_assert (mini_is_gsharedvt_type (t));
add_general (&gr, &stack_size, ainfo, TRUE);
switch (ainfo->storage) {
case RegTypeGeneral:
ainfo->storage = RegTypeGSharedVtInReg;
break;
case RegTypeBase:
ainfo->storage = RegTypeGSharedVtOnStack;
break;
default:
g_assert_not_reached ();
}
break;
default:
g_error ("Can't handle 0x%x", sig->params [i]->type);
}
n ++;
}
/* Handle the case where there are no implicit arguments */
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
gr = ARMREG_R3 + 1;
fpr = ARM_VFP_F16;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
}
DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT);
cinfo->stack_usage = stack_size;
return cinfo;
}
/*
* We need to create a temporary value if the argument is not stored in
* a linear memory range in the ccontext (this normally happens for
* value types if they are passed both by stack and regs).
*/
static int
arg_need_temp (ArgInfo *ainfo)
{
if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize)
return ainfo->struct_size;
return 0;
}
static gpointer
arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
{
switch (ainfo->storage) {
case RegTypeIRegPair:
case RegTypeGeneral:
case RegTypeStructByVal:
return &ccontext->gregs [ainfo->reg];
case RegTypeHFA:
case RegTypeFP:
if (IS_HARD_FLOAT)
return &ccontext->fregs [ainfo->reg];
else
return &ccontext->gregs [ainfo->reg];
case RegTypeBase:
return ccontext->stack + ainfo->offset;
default:
g_error ("Arg storage type not yet supported");
}
}
static void
arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
int reg_size = ainfo->size * sizeof (host_mgreg_t);
g_assert (arg_need_temp (ainfo));
memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size);
memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size);
}
static void
arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
{
int reg_size = ainfo->size * sizeof (host_mgreg_t);
g_assert (arg_need_temp (ainfo));
memcpy (&ccontext->gregs [ainfo->reg], src, reg_size);
memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size);
}
/* Set arguments in the ccontext (for i2n entry) */
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
memset (ccontext, 0, sizeof (CallContext));
ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
if (ccontext->stack_size)
ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == RegTypeStructByAddr) {
storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage;
}
}
g_assert (!sig->hasthis);
for (int i = 0; i < sig->param_count; i++) {
ainfo = &cinfo->args [i];
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size); // FIXME? alloca in a loop
else
storage = arg_get_storage (ccontext, ainfo);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
/* Set return value in the ccontext (for n2i return) */
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
gpointer storage;
ArgInfo *ainfo;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (retp) {
g_assert (ainfo->storage == RegTypeStructByAddr);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp);
} else {
g_assert (ainfo->storage != RegTypeStructByAddr);
g_assert (!arg_need_temp (ainfo));
storage = arg_get_storage (ccontext, ainfo);
memset (ccontext, 0, sizeof (CallContext)); // FIXME
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
/* Gets the arguments from ccontext (for n2i entry) */
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
ainfo = &cinfo->args [i];
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size); // FIXME? alloca in a loop
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
}
storage = NULL;
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == RegTypeStructByAddr)
storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg];
}
g_free (cinfo);
return storage;
}
/* Gets the return value from ccontext (for i2n exit) */
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
ArgInfo *ainfo;
gpointer storage;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (ainfo->storage != RegTypeStructByAddr) {
g_assert (!arg_need_temp (ainfo));
storage = arg_get_storage (ccontext, ainfo);
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
#ifndef DISABLE_JIT
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
g_assert (caller_sig);
g_assert (callee_sig);
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
/*
* Tailcalls with more callee stack usage than the caller cannot be supported, since
* the extra stack space would be left on the stack after the tailcall.
*/
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
&& IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
// FIXME The limit here is that moving the parameters requires addressing the parameters
// with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4));
res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4));
g_free (caller_info);
g_free (callee_info);
return res;
}
static gboolean
debug_omit_fp (void)
{
#if 0
return mono_debug_count ();
#else
return TRUE;
#endif
}
/**
* mono_arch_compute_omit_fp:
* Determine whether the frame pointer can be eliminated.
*/
static void
mono_arch_compute_omit_fp (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i, locals_size;
CallInfo *cinfo;
if (cfg->arch.omit_fp_computed)
return;
header = cfg->header;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*
* FIXME: Remove some of the restrictions.
*/
cfg->arch.omit_fp = TRUE;
cfg->arch.omit_fp_computed = TRUE;
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
if (!debug_omit_fp ())
cfg->arch.omit_fp = FALSE;
/*
if (cfg->method->save_lmf)
cfg->arch.omit_fp = FALSE;
*/
if (cfg->flags & MONO_CFG_HAS_ALLOCA)
cfg->arch.omit_fp = FALSE;
if (header->num_clauses)
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
cfg->arch.omit_fp = FALSE;
if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)))
cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
/*
* The stack offset can only be determined when the frame
* size is known.
*/
cfg->arch.omit_fp = FALSE;
}
}
locals_size = 0;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
int ialign;
locals_size += mono_type_size (ins->inst_vtype, &ialign);
}
}
/*
* Set var information according to the calling convention. arm version.
* The locals var stuff should most likely be split in another method.
*/
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
MonoInst *ins;
MonoType *sig_ret;
int i, offset, size, align, curinst;
CallInfo *cinfo;
ArgInfo *ainfo;
guint32 ualign;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
sig_ret = mini_get_underlying_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
if (cfg->arch.omit_fp)
cfg->frame_reg = ARMREG_SP;
else
cfg->frame_reg = ARMREG_FP;
cfg->flags |= MONO_CFG_HAS_SPILLUP;
/* allow room for the vararg method args: void* and long/double */
if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8);
header = cfg->header;
/* See mono_arch_get_global_int_regs () */
if (cfg->flags & MONO_CFG_HAS_CALLS)
cfg->uses_rgctx_reg = TRUE;
if (cfg->frame_reg != ARMREG_SP)
cfg->used_int_regs |= 1 << cfg->frame_reg;
if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
/* V5 is reserved for passing the vtable/rgctx/IMT method */
cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG);
offset = 0;
curinst = 0;
if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) {
if (sig_ret->type != MONO_TYPE_VOID) {
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = ARMREG_R0;
}
}
/* local vars are at a positive offset from the stack pointer */
/*
* also note that if the function uses alloca, we use FP
* to point at the local variables.
*/
offset = 0; /* linkage area */
/* align the offset to 16 bytes: not sure this is needed here */
//offset += 8 - 1;
//offset &= ~(8 - 1);
/* add parameter area size for called functions */
offset += cfg->param_area;
offset += 8 - 1;
offset &= ~(8 - 1);
if (cfg->flags & MONO_CFG_HAS_FPOUT)
offset += 8;
/* allow room to save the return value */
if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
offset += 8;
switch (cinfo->ret.storage) {
case RegTypeStructByVal:
case RegTypeHFA:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
offset = ALIGN_TO (offset, 8);
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = cfg->frame_reg;
cfg->ret->inst_offset = offset;
if (cinfo->ret.storage == RegTypeStructByVal)
offset += cinfo->ret.nregs * sizeof (target_mgreg_t);
else
offset += 32;
break;
case RegTypeStructByAddr:
ins = cfg->vret_addr;
offset += sizeof (target_mgreg_t) - 1;
offset &= ~(sizeof (target_mgreg_t) - 1);
ins->inst_offset = offset;
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
g_print ("vret_addr =");
mono_print_ins (cfg->vret_addr);
}
offset += sizeof (target_mgreg_t);
break;
default:
break;
}
/* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
if (cfg->arch.seq_point_info_var) {
MonoInst *ins;
ins = cfg->arch.seq_point_info_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->arch.ss_trigger_page_var) {
MonoInst *ins;
ins = cfg->arch.ss_trigger_page_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->arch.seq_point_ss_method_var) {
MonoInst *ins;
ins = cfg->arch.seq_point_ss_method_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->arch.seq_point_bp_method_var) {
MonoInst *ins;
ins = cfg->arch.seq_point_bp_method_var;
size = 4;
align = 4;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
/* Allocate a temporary used by the atomic ops */
size = 4;
align = 4;
/* Allocate a local slot to hold the sig cookie address */
offset += align - 1;
offset &= ~(align - 1);
cfg->arch.atomic_tmp_offset = offset;
offset += size;
} else {
cfg->arch.atomic_tmp_offset = -1;
}
cfg->locals_min_stack_offset = offset;
curinst = cfg->locals_start;
for (i = curinst; i < cfg->num_varinfo; ++i) {
MonoType *t;
ins = cfg->varinfo [i];
if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
continue;
t = ins->inst_vtype;
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structure */
if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign);
align = ualign;
}
else
size = mono_type_size (t, &align);
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
* since it loads/stores misaligned words, which don't do the right thing.
*/
if (align < 4 && size >= 4)
align = 4;
if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_offset = offset;
ins->inst_basereg = cfg->frame_reg;
offset += size;
//g_print ("allocating local %d to %d\n", i, inst->inst_offset);
}
cfg->locals_max_stack_offset = offset;
curinst = 0;
if (sig->hasthis) {
ins = cfg->args [curinst];
if (ins->opcode != OP_REGVAR) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
offset += sizeof (target_mgreg_t) - 1;
offset &= ~(sizeof (target_mgreg_t) - 1);
ins->inst_offset = offset;
offset += sizeof (target_mgreg_t);
}
curinst++;
}
if (sig->call_convention == MONO_CALL_VARARG) {
size = 4;
align = 4;
/* Allocate a local slot to hold the sig cookie address */
offset += align - 1;
offset &= ~(align - 1);
cfg->sig_cookie = offset;
offset += size;
}
for (i = 0; i < sig->param_count; ++i) {
ainfo = cinfo->args + i;
ins = cfg->args [curinst];
switch (ainfo->storage) {
case RegTypeHFA:
offset = ALIGN_TO (offset, 8);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
ins->inst_offset = offset;
if (cfg->verbose_level >= 2)
g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
// FIXME:
offset += 32;
break;
default:
break;
}
if (ins->opcode != OP_REGVAR) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke && !sig->marshalling_disabled);
align = ualign;
/* FIXME: if a structure is misaligned, our memcpy doesn't work,
* since it loads/stores misaligned words, which don't do the right thing.
*/
if (align < 4 && size >= 4)
align = 4;
/* The code in the prolog () stores words when storing vtypes received in a register */
if (MONO_TYPE_ISSTRUCT (sig->params [i]))
align = 4;
if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += align - 1;
offset &= ~(align - 1);
ins->inst_offset = offset;
offset += size;
}
curinst++;
}
/* align the offset to 8 bytes */
if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
offset += 8 - 1;
offset &= ~(8 - 1);
/* change sign? */
cfg->stack_offset = offset;
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
int i;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (IS_HARD_FLOAT) {
for (i = 0; i < 2; i++) {
MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL);
inst->flags |= MONO_INST_VOLATILE;
cfg->arch.vfp_scratch_slots [i] = inst;
}
}
if (cinfo->ret.storage == RegTypeStructByVal)
cfg->ret_var_is_local = TRUE;
if (cinfo->ret.storage == RegTypeStructByAddr) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
g_print ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points) {
if (cfg->compile_aot) {
MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
if (!cfg->soft_breakpoints) {
/* Allocate a separate variable for this to save 1 load per seq point */
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_trigger_page_var = ins;
}
}
if (cfg->soft_breakpoints) {
MonoInst *ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_ss_method_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_bp_method_var = ins;
}
}
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmp_sig;
int sig_reg;
if (MONO_IS_TAILCALL_OPCODE (call))
NOT_IMPLEMENTED;
g_assert (cinfo->sig_cookie.storage == RegTypeBase);
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it are
* passed on the stack after the signature. So compensate by
* passing a different signature.
*/
tmp_sig = mono_metadata_signature_dup (call->signature);
tmp_sig->param_count -= call->signature->sentinelpos;
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
sig_reg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
}
#ifdef ENABLE_LLVM
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
LLVMCallInfo *linfo;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
/*
* LLVM always uses the native ABI while we use our own ABI, the
* only difference is the handling of vtypes:
* - we only pass/receive them in registers in some cases, and only
* in 1 or 2 integer registers.
*/
switch (cinfo->ret.storage) {
case RegTypeNone:
linfo->ret.storage = LLVMArgNone;
break;
case RegTypeGeneral:
case RegTypeFP:
case RegTypeIRegPair:
linfo->ret.storage = LLVMArgNormal;
break;
case RegTypeStructByAddr:
if (sig->pinvoke) {
linfo->ret.storage = LLVMArgVtypeByRef;
} else {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
}
break;
#if TARGET_WATCHOS
case RegTypeStructByVal:
/* LLVM models this by returning an int array */
linfo->ret.storage = LLVMArgAsIArgs;
linfo->ret.nslots = cinfo->ret.nregs;
break;
#endif
case RegTypeHFA:
linfo->ret.storage = LLVMArgFpStruct;
linfo->ret.nslots = cinfo->ret.nregs;
linfo->ret.esize = cinfo->ret.esize;
break;
default:
cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage);
cfg->disable_llvm = TRUE;
return linfo;
}
for (i = 0; i < n; ++i) {
LLVMArgInfo *lainfo = &linfo->args [i];
ainfo = cinfo->args + i;
lainfo->storage = LLVMArgNone;
switch (ainfo->storage) {
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeBase:
case RegTypeBaseGen:
case RegTypeFP:
lainfo->storage = LLVMArgNormal;
break;
case RegTypeStructByVal: {
lainfo->storage = LLVMArgAsIArgs;
int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4;
lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize;
lainfo->esize = slotsize;
break;
}
case RegTypeStructByAddr:
case RegTypeStructByAddrOnStack:
lainfo->storage = LLVMArgVtypeByRef;
break;
case RegTypeHFA: {
int j;
lainfo->storage = LLVMArgAsFpArgs;
lainfo->nslots = ainfo->nregs;
lainfo->esize = ainfo->esize;
for (j = 0; j < ainfo->nregs; ++j)
lainfo->pair_storage [j] = LLVMArgInFPReg;
break;
}
default:
cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
cfg->disable_llvm = TRUE;
break;
}
}
return linfo;
}
#endif
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *in, *ins;
MonoMethodSignature *sig;
int i, n;
CallInfo *cinfo;
sig = call->signature;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
switch (cinfo->ret.storage) {
case RegTypeStructByVal:
case RegTypeHFA:
if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
/* The JIT will transform this into a normal call */
call->vret_in_reg = TRUE;
break;
}
if (MONO_IS_TAILCALL_OPCODE (call))
break;
/*
* The vtype is returned in registers, save the return area address in a local, and save the vtype into
* the location pointed to by it after call in emit_move_return_value ().
*/
if (!cfg->arch.vret_addr_loc) {
cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* Prevent it from being register allocated or optimized away */
cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
break;
case RegTypeStructByAddr: {
MonoInst *vtarg;
MONO_INST_NEW (cfg, vtarg, OP_MOVE);
vtarg->sreg1 = call->vret_var->dreg;
vtarg->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
break;
}
default:
break;
}
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = cinfo->args + i;
MonoType *t;
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mini_get_underlying_type (t);
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Emit the signature cookie just before the implicit arguments */
emit_sig_cookie (cfg, call, cinfo);
}
in = call->args [i];
switch (ainfo->storage) {
case RegTypeGeneral:
case RegTypeIRegPair:
if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = MONO_LVREG_LS (in->dreg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = MONO_LVREG_MS (in->dreg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
} else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
if (ainfo->size == 4) {
if (IS_SOFT_FLOAT) {
/* mono_emit_call_args () have already done the r8->r4 conversion */
/* The converted value is in an int vreg */
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
} else {
int creg;
cfg->param_area = MAX (cfg->param_area, 8);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
}
} else {
if (IS_SOFT_FLOAT) {
MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
} else {
int creg;
cfg->param_area = MAX (cfg->param_area, 8);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
}
}
cfg->flags |= MONO_CFG_HAS_FPOUT;
} else {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
}
break;
case RegTypeStructByVal:
case RegTypeGSharedVtInReg:
case RegTypeGSharedVtOnStack:
case RegTypeHFA:
case RegTypeStructByAddr:
case RegTypeStructByAddrOnStack:
MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
ins->opcode = OP_OUTARG_VT;
ins->sreg1 = in->dreg;
ins->klass = in->klass;
ins->inst_p0 = call;
ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
mono_call_inst_add_outarg_vt (cfg, call, ins);
MONO_ADD_INS (cfg->cbb, ins);
break;
case RegTypeBase:
if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
} else if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
if (t->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
} else {
if (IS_SOFT_FLOAT)
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
else
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
}
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
}
break;
case RegTypeBaseGen:
if (!m_type_is_byref (t) && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg));
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
} else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) {
int creg;
/* This should work for soft-float as well */
cfg->param_area = MAX (cfg->param_area, 8);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
creg = mono_alloc_ireg (cfg);
mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
creg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
cfg->flags |= MONO_CFG_HAS_FPOUT;
} else {
g_assert_not_reached ();
}
break;
case RegTypeFP: {
int fdreg = mono_alloc_freg (cfg);
if (ainfo->size == 8) {
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->sreg1 = in->dreg;
ins->dreg = fdreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
} else {
FloatArgData *fad;
/*
* Mono's register allocator doesn't speak single-precision registers that
* overlap double-precision registers (i.e. armhf). So we have to work around
* the register allocator and load the value from memory manually.
*
* So we create a variable for the float argument and an instruction to store
* the argument into the variable. We then store the list of these arguments
* in call->float_args. This list is then used by emit_float_args later to
* pass the arguments in the various call opcodes.
*
* This is not very nice, and we should really try to fix the allocator.
*/
MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
/* Make sure the instruction isn't seen as pointless and removed.
*/
float_arg->flags |= MONO_INST_VOLATILE;
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
/* We use the dreg to look up the instruction later. The hreg is used to
* emit the instruction that loads the value into the FP reg.
*/
fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
fad->vreg = float_arg->dreg;
fad->hreg = ainfo->reg;
call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
}
call->used_iregs |= 1 << ainfo->reg;
cfg->flags |= MONO_CFG_HAS_FPOUT;
break;
}
default:
g_assert_not_reached ();
}
}
/* Handle the case where there are no implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
call->call_info = cinfo;
call->stack_usage = cinfo->stack_usage;
}
static void
add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
{
MonoInst *ins;
switch (storage) {
case RegTypeFP:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
break;
}
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
MonoInst *load;
ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
int ovf_size = ainfo->vtsize;
int doffset = ainfo->offset;
int struct_size = ainfo->struct_size;
int i, soffset, dreg, tmpreg;
switch (ainfo->storage) {
case RegTypeGSharedVtInReg:
case RegTypeStructByAddr:
/* Pass by addr */
mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
break;
case RegTypeGSharedVtOnStack:
case RegTypeStructByAddrOnStack:
/* Pass by addr on stack */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
break;
case RegTypeHFA:
for (i = 0; i < ainfo->nregs; ++i) {
if (ainfo->esize == 4)
MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
else
MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
load->dreg = mono_alloc_freg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = i * ainfo->esize;
MONO_ADD_INS (cfg->cbb, load);
if (ainfo->esize == 4) {
FloatArgData *fad;
/* See RegTypeFP in mono_arch_emit_call () */
MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
float_arg->flags |= MONO_INST_VOLATILE;
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg);
fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
fad->vreg = float_arg->dreg;
fad->hreg = ainfo->reg + i;
call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
} else {
add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load);
}
}
break;
default:
soffset = 0;
for (i = 0; i < ainfo->size; ++i) {
dreg = mono_alloc_ireg (cfg);
switch (struct_size) {
case 1:
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
break;
case 2:
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
break;
case 3:
tmpreg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
break;
default:
MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
break;
}
mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
soffset += sizeof (target_mgreg_t);
struct_size -= sizeof (target_mgreg_t);
}
//g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
if (ovf_size != 0)
mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4);
break;
}
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (!m_type_is_byref (ret)) {
if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
MonoInst *ins;
if (COMPILE_LLVM (cfg)) {
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
} else {
MONO_INST_NEW (cfg, ins, OP_SETLRET);
ins->sreg1 = MONO_LVREG_LS (val->dreg);
ins->sreg2 = MONO_LVREG_MS (val->dreg);
MONO_ADD_INS (cfg->cbb, ins);
}
return;
}
switch (arm_fpu) {
case MONO_ARM_FPU_NONE:
if (ret->type == MONO_TYPE_R8) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_SETFRET);
ins->dreg = cfg->ret->dreg;
ins->sreg1 = val->dreg;
MONO_ADD_INS (cfg->cbb, ins);
return;
}
if (ret->type == MONO_TYPE_R4) {
/* Already converted to an int in method_to_ir () */
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
return;
}
break;
case MONO_ARM_FPU_VFP:
case MONO_ARM_FPU_VFP_HARD:
if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_SETFRET);
ins->dreg = cfg->ret->dreg;
ins->sreg1 = val->dreg;
MONO_ADD_INS (cfg->cbb, ins);
return;
}
break;
default:
g_assert_not_reached ();
}
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
#endif /* #ifndef DISABLE_JIT */
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return TRUE;
}
typedef struct {
MonoMethodSignature *sig;
CallInfo *cinfo;
MonoType *rtype;
MonoType **param_types;
} ArchDynCallInfo;
static gboolean
dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
{
int i;
switch (cinfo->ret.storage) {
case RegTypeNone:
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeStructByAddr:
break;
case RegTypeFP:
if (IS_VFP)
break;
else
return FALSE;
default:
return FALSE;
}
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
int last_slot;
switch (ainfo->storage) {
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeBaseGen:
case RegTypeFP:
break;
case RegTypeBase:
break;
case RegTypeStructByVal:
if (ainfo->size == 0)
last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
else
last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
break;
default:
return FALSE;
}
}
// FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
for (i = 0; i < sig->param_count; ++i) {
MonoType *t = sig->params [i];
if (m_type_is_byref (t))
continue;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_R4:
case MONO_TYPE_R8:
if (IS_SOFT_FLOAT)
return FALSE;
else
break;
/*
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return FALSE;
*/
default:
break;
}
}
return TRUE;
}
MonoDynCallInfo*
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
ArchDynCallInfo *info;
CallInfo *cinfo;
int i;
cinfo = get_call_info (NULL, sig);
if (!dyn_call_supported (cinfo, sig)) {
g_free (cinfo);
return NULL;
}
info = g_new0 (ArchDynCallInfo, 1);
// FIXME: Preprocess the info to speed up start_dyn_call ()
info->sig = sig;
info->cinfo = cinfo;
info->rtype = mini_get_underlying_type (sig->ret);
info->param_types = g_new0 (MonoType*, sig->param_count);
for (i = 0; i < sig->param_count; ++i)
info->param_types [i] = mini_get_underlying_type (sig->params [i]);
return (MonoDynCallInfo*)info;
}
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_free (ainfo->cinfo);
g_free (ainfo);
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage;
}
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
CallInfo *cinfo = dinfo->cinfo;
DynCallArgs *p = (DynCallArgs*)buf;
int arg_index, greg, i, j, pindex;
MonoMethodSignature *sig = dinfo->sig;
p->res = 0;
p->ret = ret;
p->has_fpregs = 0;
p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
arg_index = 0;
greg = 0;
pindex = 0;
if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]);
if (!sig->hasthis)
pindex = 1;
}
if (dinfo->cinfo->ret.storage == RegTypeStructByAddr)
p->regs [greg ++] = (host_mgreg_t)(gsize)ret;
for (i = pindex; i < sig->param_count; i++) {
MonoType *t = dinfo->param_types [i];
gpointer *arg = args [arg_index ++];
ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
int slot = -1;
if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) {
slot = ainfo->reg;
} else if (ainfo->storage == RegTypeFP) {
} else if (ainfo->storage == RegTypeBase) {
slot = PARAM_REGS + (ainfo->offset / 4);
} else if (ainfo->storage == RegTypeBaseGen) {
/* slot + 1 is the first stack slot, so the code below will work */
slot = 3;
} else {
g_assert_not_reached ();
}
if (m_type_is_byref (t)) {
p->regs [slot] = (host_mgreg_t)(gsize)*arg;
continue;
}
switch (t->type) {
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
p->regs [slot] = (host_mgreg_t)(gsize)*arg;
break;
case MONO_TYPE_U1:
p->regs [slot] = *(guint8*)arg;
break;
case MONO_TYPE_I1:
p->regs [slot] = *(gint8*)arg;
break;
case MONO_TYPE_I2:
p->regs [slot] = *(gint16*)arg;
break;
case MONO_TYPE_U2:
p->regs [slot] = *(guint16*)arg;
break;
case MONO_TYPE_I4:
p->regs [slot] = *(gint32*)arg;
break;
case MONO_TYPE_U4:
p->regs [slot] = *(guint32*)arg;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
break;
case MONO_TYPE_R4:
if (ainfo->storage == RegTypeFP) {
float f = *(float*)arg;
p->fpregs [ainfo->reg / 2] = *(double*)&f;
p->has_fpregs = 1;
} else {
p->regs [slot] = *(host_mgreg_t*)arg;
}
break;
case MONO_TYPE_R8:
if (ainfo->storage == RegTypeFP) {
p->fpregs [ainfo->reg / 2] = *(double*)arg;
p->has_fpregs = 1;
} else {
p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
}
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
p->regs [slot] = (host_mgreg_t)(gsize)*arg;
break;
} else {
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
guint8 *nullable_buf;
int size;
size = mono_class_value_size (klass, NULL);
nullable_buf = g_alloca (size);
g_assert (nullable_buf);
/* The argument pointed to by arg is either a boxed vtype or null */
mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
arg = (gpointer*)nullable_buf;
/* Fall though */
} else {
/* Fall though */
}
}
case MONO_TYPE_VALUETYPE:
g_assert (ainfo->storage == RegTypeStructByVal);
if (ainfo->size == 0)
slot = PARAM_REGS + (ainfo->offset / 4);
else
slot = ainfo->reg;
for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
p->regs [slot ++] = ((host_mgreg_t*)arg) [j];
break;
default:
g_assert_not_reached ();
}
}
}
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
MonoType *ptype = ainfo->rtype;
guint8 *ret = p->ret;
host_mgreg_t res = p->res;
host_mgreg_t res2 = p->res2;
switch (ptype->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
case MONO_TYPE_OBJECT:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
*(gpointer*)ret = (gpointer)(gsize)res;
break;
case MONO_TYPE_I1:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
*(gint32*)ret = res;
break;
case MONO_TYPE_U4:
*(guint32*)ret = res;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
/* This handles endianness as well */
((gint32*)ret) [0] = res;
((gint32*)ret) [1] = res2;
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (ptype)) {
*(gpointer*)ret = (gpointer)res;
break;
} else {
/* Fall though */
}
case MONO_TYPE_VALUETYPE:
g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr);
/* Nothing to do */
break;
case MONO_TYPE_R4:
g_assert (IS_VFP);
if (IS_HARD_FLOAT)
*(float*)ret = *(float*)&p->fpregs [0];
else
*(float*)ret = *(float*)&res;
break;
case MONO_TYPE_R8: {
host_mgreg_t regs [2];
g_assert (IS_VFP);
if (IS_HARD_FLOAT) {
*(double*)ret = p->fpregs [0];
} else {
regs [0] = res;
regs [1] = res2;
*(double*)ret = *(double*)®s;
}
break;
}
default:
g_assert_not_reached ();
}
}
#ifndef DISABLE_JIT
/*
* The immediate field for cond branches is big enough for all reasonable methods
*/
#define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
if (0 && ins->inst_true_bb->native_offset) { \
ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
} else { \
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
ARM_B_COND (code, (condcode), 0); \
}
#define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
/* emit an exception if condition is fail
*
* We assign the extra code used to throw the implicit exceptions
* to cfg->bb_exit as far as the big branch handling is concerned
*/
#define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
do { \
mono_add_patch_info (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_EXC, exc_name); \
ARM_BL_COND (code, (condcode), 0); \
} while (0);
#define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
switch (ins->opcode) {
case OP_MUL_IMM:
case OP_IMUL_IMM:
/* Already done by an arch-independent pass */
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
/*
* OP_STORE_MEMBASE_REG reg, offset(basereg)
* OP_LOAD_MEMBASE offset(basereg), reg
*/
if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
|| last_ins->opcode == OP_STORE_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
MONO_DELETE_INS (bb, ins);
continue;
} else {
//static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
ins->opcode = OP_MOVE;
ins->sreg1 = last_ins->sreg1;
}
/*
* Note: reg1 must be different from the basereg in the second load
* OP_LOAD_MEMBASE offset(basereg), reg1
* OP_LOAD_MEMBASE offset(basereg), reg2
* -->
* OP_LOAD_MEMBASE offset(basereg), reg1
* OP_MOVE reg1, reg2
*/
} if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
|| last_ins->opcode == OP_LOAD_MEMBASE) &&
ins->inst_basereg != last_ins->dreg &&
ins->inst_basereg == last_ins->inst_basereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->dreg) {
MONO_DELETE_INS (bb, ins);
continue;
} else {
ins->opcode = OP_MOVE;
ins->sreg1 = last_ins->dreg;
}
//g_assert_not_reached ();
#if 0
/*
* OP_STORE_MEMBASE_IMM imm, offset(basereg)
* OP_LOAD_MEMBASE offset(basereg), reg
* -->
* OP_STORE_MEMBASE_IMM imm, offset(basereg)
* OP_ICONST reg, imm
*/
} else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
|| last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
//static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
ins->opcode = OP_ICONST;
ins->inst_c0 = last_ins->inst_imm;
g_assert_not_reached (); // check this rule
#endif
}
break;
case OP_LOADU1_MEMBASE:
case OP_LOADI1_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
ins->sreg1 = last_ins->sreg1;
}
break;
case OP_LOADU2_MEMBASE:
case OP_LOADI2_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
ins->sreg1 = last_ins->sreg1;
}
break;
case OP_MOVE:
ins->opcode = OP_MOVE;
/*
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
MONO_DELETE_INS (bb, ins);
continue;
}
/*
* OP_MOVE sreg, dreg
* OP_MOVE dreg, sreg
*/
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
MONO_DELETE_INS (bb, ins);
continue;
}
break;
}
}
}
/*
* the branch_cc_table should maintain the order of these
* opcodes.
case CEE_BEQ:
case CEE_BGE:
case CEE_BGT:
case CEE_BLE:
case CEE_BLT:
case CEE_BNE_UN:
case CEE_BGE_UN:
case CEE_BGT_UN:
case CEE_BLE_UN:
case CEE_BLT_UN:
*/
static const guchar
branch_cc_table [] = {
ARMCOND_EQ,
ARMCOND_GE,
ARMCOND_GT,
ARMCOND_LE,
ARMCOND_LT,
ARMCOND_NE,
ARMCOND_HS,
ARMCOND_HI,
ARMCOND_LS,
ARMCOND_LO
};
#define ADD_NEW_INS(cfg,dest,op) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
static int
map_to_reg_reg_op (int op)
{
switch (op) {
case OP_ADD_IMM:
return OP_IADD;
case OP_SUB_IMM:
return OP_ISUB;
case OP_AND_IMM:
return OP_IAND;
case OP_COMPARE_IMM:
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
case OP_ADDCC_IMM:
return OP_ADDCC;
case OP_ADC_IMM:
return OP_ADC;
case OP_SUBCC_IMM:
return OP_SUBCC;
case OP_SBB_IMM:
return OP_SBB;
case OP_OR_IMM:
return OP_IOR;
case OP_XOR_IMM:
return OP_IXOR;
case OP_LOAD_MEMBASE:
return OP_LOAD_MEMINDEX;
case OP_LOADI4_MEMBASE:
return OP_LOADI4_MEMINDEX;
case OP_LOADU4_MEMBASE:
return OP_LOADU4_MEMINDEX;
case OP_LOADU1_MEMBASE:
return OP_LOADU1_MEMINDEX;
case OP_LOADI2_MEMBASE:
return OP_LOADI2_MEMINDEX;
case OP_LOADU2_MEMBASE:
return OP_LOADU2_MEMINDEX;
case OP_LOADI1_MEMBASE:
return OP_LOADI1_MEMINDEX;
case OP_STOREI1_MEMBASE_REG:
return OP_STOREI1_MEMINDEX;
case OP_STOREI2_MEMBASE_REG:
return OP_STOREI2_MEMINDEX;
case OP_STOREI4_MEMBASE_REG:
return OP_STOREI4_MEMINDEX;
case OP_STORE_MEMBASE_REG:
return OP_STORE_MEMINDEX;
case OP_STORER4_MEMBASE_REG:
return OP_STORER4_MEMINDEX;
case OP_STORER8_MEMBASE_REG:
return OP_STORER8_MEMINDEX;
case OP_STORE_MEMBASE_IMM:
return OP_STORE_MEMBASE_REG;
case OP_STOREI1_MEMBASE_IMM:
return OP_STOREI1_MEMBASE_REG;
case OP_STOREI2_MEMBASE_IMM:
return OP_STOREI2_MEMBASE_REG;
case OP_STOREI4_MEMBASE_IMM:
return OP_STOREI4_MEMBASE_REG;
}
g_assert_not_reached ();
}
/*
* Remove from the instruction list the instructions that can't be
* represented with very simple instructions with no register
* requirements.
*/
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *temp, *last_ins = NULL;
int rot_amount, imm8, low_imm;
MONO_BB_FOR_EACH_INS (bb, ins) {
loop_start:
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_SUB_IMM:
case OP_AND_IMM:
case OP_COMPARE_IMM:
case OP_ICOMPARE_IMM:
case OP_ADDCC_IMM:
case OP_ADC_IMM:
case OP_SUBCC_IMM:
case OP_SBB_IMM:
case OP_OR_IMM:
case OP_XOR_IMM:
case OP_IADD_IMM:
case OP_ISUB_IMM:
case OP_IAND_IMM:
case OP_IADC_IMM:
case OP_ISBB_IMM:
case OP_IOR_IMM:
case OP_IXOR_IMM:
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
int opcode2 = mono_op_imm_to_op (ins->opcode);
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
if (opcode2 == -1)
g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
ins->opcode = opcode2;
}
if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
goto loop_start;
else
break;
case OP_MUL_IMM:
case OP_IMUL_IMM:
if (ins->inst_imm == 1) {
ins->opcode = OP_MOVE;
break;
}
if (ins->inst_imm == 0) {
ins->opcode = OP_ICONST;
ins->inst_c0 = 0;
break;
}
imm8 = (ins->inst_imm > 0) ? mono_is_power_of_two (ins->inst_imm) : -1;
if (imm8 > 0) {
ins->opcode = OP_SHL_IMM;
ins->inst_imm = imm8;
break;
}
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = OP_IMUL;
break;
case OP_SBB:
case OP_ISBB:
case OP_SUBCC:
case OP_ISUBCC: {
int try_count = 2;
MonoInst *current = ins;
/* may require a look-ahead of a couple instructions due to spilling */
while (try_count-- && current->next) {
if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) {
/* ARM sets the C flag to 1 if there was _no_ overflow */
current->next->opcode = OP_COND_EXC_NC;
break;
}
current = current->next;
}
break;
}
case OP_IDIV_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_IMM:
case OP_IREM_UN_IMM: {
int opcode2 = mono_op_imm_to_op (ins->opcode);
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
if (opcode2 == -1)
g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
ins->opcode = opcode2;
break;
}
case OP_LOCALLOC_IMM:
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = OP_LOCALLOC;
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADU1_MEMBASE:
/* we can do two things: load the immed in a register
* and use an indexed load, or see if the immed can be
* represented as an ad_imm + a load with a smaller offset
* that fits. We just do the first for now, optimize later.
*/
if (arm_is_imm12 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI1_MEMBASE:
if (arm_is_imm8 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_LOADR4_MEMBASE:
case OP_LOADR8_MEMBASE:
if (arm_is_fpimm8 (ins->inst_offset))
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_basereg;
temp->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = temp->dreg;
ins->inst_offset = low_imm;
} else {
MonoInst *add_ins;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ADD_NEW_INS (cfg, add_ins, OP_IADD);
add_ins->sreg1 = ins->inst_basereg;
add_ins->sreg2 = temp->dreg;
add_ins->dreg = mono_alloc_ireg (cfg);
ins->inst_basereg = add_ins->dreg;
ins->inst_offset = 0;
}
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI1_MEMBASE_REG:
if (arm_is_imm12 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_STOREI2_MEMBASE_REG:
if (arm_is_imm8 (ins->inst_offset))
break;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_STORER4_MEMBASE_REG:
case OP_STORER8_MEMBASE_REG:
if (arm_is_fpimm8 (ins->inst_offset))
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_destbasereg;
temp->dreg = mono_alloc_ireg (cfg);
ins->inst_destbasereg = temp->dreg;
ins->inst_offset = low_imm;
} else {
MonoInst *add_ins;
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
ADD_NEW_INS (cfg, add_ins, OP_IADD);
add_ins->sreg1 = ins->inst_destbasereg;
add_ins->sreg2 = temp->dreg;
add_ins->dreg = mono_alloc_ireg (cfg);
ins->inst_destbasereg = add_ins->dreg;
ins->inst_offset = 0;
}
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
case OP_FCOMPARE:
case OP_RCOMPARE: {
gboolean swap = FALSE;
int reg;
if (!ins->next) {
/* Optimized away */
NULLIFY_INS (ins);
break;
}
/* Some fp compares require swapped operands */
switch (ins->next->opcode) {
case OP_FBGT:
ins->next->opcode = OP_FBLT;
swap = TRUE;
break;
case OP_FBGT_UN:
ins->next->opcode = OP_FBLT_UN;
swap = TRUE;
break;
case OP_FBLE:
ins->next->opcode = OP_FBGE;
swap = TRUE;
break;
case OP_FBLE_UN:
ins->next->opcode = OP_FBGE_UN;
swap = TRUE;
break;
default:
break;
}
if (swap) {
reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = reg;
}
break;
}
}
last_ins = ins;
}
bb->last_ins = last_ins;
bb->max_vreg = cfg->next_vreg;
}
void
mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
{
MonoInst *ins;
if (long_ins->opcode == OP_LNEG) {
ins = long_ins;
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0);
NULLIFY_INS (ins);
}
}
static guchar*
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg */
if (IS_VFP) {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
if (is_signed)
ARM_TOSIZD (code, vfp_scratch1, sreg);
else
ARM_TOUIZD (code, vfp_scratch1, sreg);
ARM_FMRS (code, dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
if (!is_signed) {
if (size == 1)
ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SHR_IMM (code, dreg, dreg, 16);
}
} else {
if (size == 1) {
ARM_SHL_IMM (code, dreg, dreg, 24);
ARM_SAR_IMM (code, dreg, dreg, 24);
} else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SAR_IMM (code, dreg, dreg, 16);
}
}
return code;
}
static guchar*
emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg */
g_assert (IS_VFP);
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
if (is_signed)
ARM_TOSIZS (code, vfp_scratch1, sreg);
else
ARM_TOUIZS (code, vfp_scratch1, sreg);
ARM_FMRS (code, dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
if (!is_signed) {
if (size == 1)
ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SHR_IMM (code, dreg, dreg, 16);
}
} else {
if (size == 1) {
ARM_SHL_IMM (code, dreg, dreg, 24);
ARM_SAR_IMM (code, dreg, dreg, 24);
} else if (size == 2) {
ARM_SHL_IMM (code, dreg, dreg, 16);
ARM_SAR_IMM (code, dreg, dreg, 16);
}
}
return code;
}
#endif /* #ifndef DISABLE_JIT */
#define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
static void
emit_thunk (guint8 *code, gconstpointer target)
{
guint8 *p = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
if (thumb_supported)
ARM_BX (code, ARMREG_IP);
else
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
*(guint32*)code = (guint32)(gsize)target;
code += 4;
mono_arch_flush_icache (p, code - p);
}
static void
handle_thunk (MonoCompile *cfg, guchar *code, const guchar *target)
{
MonoJitInfo *ji = NULL;
MonoThunkJitInfo *info;
guint8 *thunks, *p;
int thunks_size;
guint8 *orig_target;
guint8 *target_thunk;
if (cfg) {
/*
* This can be called multiple times during JITting,
* save the current position in cfg->arch to avoid
* doing a O(n^2) search.
*/
if (!cfg->arch.thunks) {
cfg->arch.thunks = cfg->thunks;
cfg->arch.thunks_size = cfg->thunk_area;
}
thunks = cfg->arch.thunks;
thunks_size = cfg->arch.thunks_size;
if (!thunks_size) {
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
g_assert_not_reached ();
}
g_assert (*(guint32*)thunks == 0);
emit_thunk (thunks, target);
arm_patch (code, thunks);
cfg->arch.thunks += THUNK_SIZE;
cfg->arch.thunks_size -= THUNK_SIZE;
} else {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = mono_jit_info_get_thunk_info (ji);
g_assert (info);
thunks = (guint8*)ji->code_start + info->thunks_offset;
thunks_size = info->thunks_size;
orig_target = mono_arch_get_call_target (code + 4);
mono_mini_arch_lock ();
target_thunk = NULL;
if (orig_target >= thunks && orig_target < thunks + thunks_size) {
/* The call already points to a thunk, because of trampolines etc. */
target_thunk = orig_target;
} else {
for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
if (((guint32*)p) [0] == 0) {
/* Free entry */
target_thunk = p;
break;
} else if (((guint32*)p) [2] == (guint32)(gsize)target) {
/* Thunk already points to target */
target_thunk = p;
break;
}
}
}
//g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
if (!target_thunk) {
mono_mini_arch_unlock ();
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
g_assert_not_reached ();
}
emit_thunk (target_thunk, target);
arm_patch (code, target_thunk);
mono_arch_flush_icache (code, 4);
mono_mini_arch_unlock ();
}
}
static void
arm_patch_general (MonoCompile *cfg, guchar *code, const guchar *target)
{
guint32 *code32 = (guint32*)code;
guint32 ins = *code32;
guint32 prim = (ins >> 25) & 7;
guint32 tval = GPOINTER_TO_UINT (target);
//g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
if (prim == 5) { /* 101b */
/* the diff starts 8 bytes from the branch opcode */
gint diff = target - code - 8;
gint tbits;
gint tmask = 0xffffffff;
if (tval & 1) { /* entering thumb mode */
diff = target - 1 - code - 8;
g_assert (thumb_supported);
tbits = 0xf << 28; /* bl->blx bit pattern */
g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
/* this low bit of the displacement is moved to bit 24 in the instruction encoding */
if (diff & 2) {
tbits |= 1 << 24;
}
tmask = ~(1 << 24); /* clear the link bit */
/*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
} else {
tbits = 0;
}
if (diff >= 0) {
if (diff <= 33554431) {
diff >>= 2;
ins = (ins & 0xff000000) | diff;
ins &= tmask;
*code32 = ins | tbits;
return;
}
} else {
/* diff between 0 and -33554432 */
if (diff >= -33554432) {
diff >>= 2;
ins = (ins & 0xff000000) | (diff & ~0xff000000);
ins &= tmask;
*code32 = ins | tbits;
return;
}
}
handle_thunk (cfg, code, target);
return;
}
/*
* The alternative call sequences looks like this:
*
* ldr ip, [pc] // loads the address constant
* b 1f // jumps around the constant
* address constant embedded in the code
* 1f:
* mov lr, pc
* mov pc, ip
*
* There are two cases for patching:
* a) at the end of method emission: in this case code points to the start
* of the call sequence
* b) during runtime patching of the call site: in this case code points
* to the mov pc, ip instruction
*
* We have to handle also the thunk jump code sequence:
*
* ldr ip, [pc]
* mov pc, ip
* address constant // execution never reaches here
*/
if ((ins & 0x0ffffff0) == 0x12fff10) {
/* Branch and exchange: the address is constructed in a reg
* We can patch BX when the code sequence is the following:
* ldr ip, [pc, #0] ; 0x8
* b 0xc
* .word code_ptr
* mov lr, pc
* bx ips
* */
guint32 ccode [4];
guint8 *emit = (guint8*)ccode;
ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
ARM_B (emit, 0);
ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
ARM_BX (emit, ARMREG_IP);
/*patching from magic trampoline*/
if (ins == ccode [3]) {
g_assert (code32 [-4] == ccode [0]);
g_assert (code32 [-3] == ccode [1]);
g_assert (code32 [-1] == ccode [2]);
code32 [-2] = (guint32)(gsize)target;
return;
}
/*patching from JIT*/
if (ins == ccode [0]) {
g_assert (code32 [1] == ccode [1]);
g_assert (code32 [3] == ccode [2]);
g_assert (code32 [4] == ccode [3]);
code32 [2] = (guint32)(gsize)target;
return;
}
g_assert_not_reached ();
} else if ((ins & 0x0ffffff0) == 0x12fff30) {
/*
* ldr ip, [pc, #0]
* b 0xc
* .word code_ptr
* blx ip
*/
guint32 ccode [4];
guint8 *emit = (guint8*)ccode;
ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
ARM_B (emit, 0);
ARM_BLX_REG (emit, ARMREG_IP);
g_assert (code32 [-3] == ccode [0]);
g_assert (code32 [-2] == ccode [1]);
g_assert (code32 [0] == ccode [2]);
code32 [-1] = (guint32)(gsize)target;
} else {
guint32 ccode [4];
guint32 *tmp = ccode;
guint8 *emit = (guint8*)tmp;
ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
ARM_BX (emit, ARMREG_IP);
if (ins == ccode [2]) {
g_assert_not_reached (); // should be -2 ...
code32 [-1] = (guint32)(gsize)target;
return;
}
if (ins == ccode [0]) {
/* handles both thunk jump code and the far call sequence */
code32 [2] = (guint32)(gsize)target;
return;
}
g_assert_not_reached ();
}
// g_print ("patched with 0x%08x\n", ins);
}
void
arm_patch (guchar *code, const guchar *target)
{
arm_patch_general (NULL, code, target);
}
/*
* Return the >= 0 uimm8 value if val can be represented with a byte + rotation
* (with the rotation amount in *rot_amount. rot_amount is already adjusted
* to be used with the emit macros.
* Return -1 otherwise.
*/
int
mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
{
guint32 res, i;
for (i = 0; i < 31; i+= 2) {
if (i == 0)
res = val;
else
res = (val << (32 - i)) | (val >> i);
if (res & ~0xff)
continue;
*rot_amount = i? 32 - i: 0;
return res;
}
return -1;
}
/*
* Emits in code a sequence of instructions that load the value 'val'
* into the dreg register. Uses at most 4 instructions.
*/
guint8*
mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
{
int imm8, rot_amount;
#if 0
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
/* skip the constant pool */
ARM_B (code, 0);
*(int*)code = val;
code += 4;
return code;
#endif
if (mini_debug_options.single_imm_size && v7_supported) {
ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
return code;
}
if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
} else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
} else {
if (v7_supported) {
ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
if (val >> 16)
ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
return code;
}
if (val & 0xFF) {
ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
if (val & 0xFF00) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
}
if (val & 0xFF0000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
}
if (val & 0xFF000000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
}
} else if (val & 0xFF00) {
ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
if (val & 0xFF0000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
}
if (val & 0xFF000000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
}
} else if (val & 0xFF0000) {
ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
if (val & 0xFF000000) {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
}
}
//g_assert_not_reached ();
}
return code;
}
gboolean
mono_arm_thumb_supported (void)
{
return thumb_supported;
}
gboolean
mono_arm_eabi_supported (void)
{
return eabi_supported;
}
int
mono_arm_i8_align (void)
{
return i8_align;
}
#ifndef DISABLE_JIT
static guint8*
emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
{
CallInfo *cinfo;
MonoCallInst *call;
call = (MonoCallInst*)ins;
cinfo = call->call_info;
switch (cinfo->ret.storage) {
case RegTypeStructByVal:
case RegTypeHFA: {
MonoInst *loc = cfg->arch.vret_addr_loc;
int i;
if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
/* The JIT treats this as a normal call */
break;
}
/* Load the destination address */
g_assert (loc && loc->opcode == OP_REGOFFSET);
if (arm_is_imm12 (loc->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR);
}
if (cinfo->ret.storage == RegTypeStructByVal) {
int rsize = cinfo->ret.struct_size;
for (i = 0; i < cinfo->ret.nregs; ++i) {
g_assert (rsize >= 0);
switch (rsize) {
case 0:
break;
case 1:
ARM_STRB_IMM (code, i, ARMREG_LR, i * 4);
break;
case 2:
ARM_STRH_IMM (code, i, ARMREG_LR, i * 4);
break;
default:
ARM_STR_IMM (code, i, ARMREG_LR, i * 4);
break;
}
rsize -= 4;
}
} else {
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4);
else
ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8);
}
}
return code;
}
default:
break;
}
switch (ins->opcode) {
case OP_FCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE:
if (IS_VFP) {
MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
if (sig_ret->type == MONO_TYPE_R4) {
if (IS_HARD_FLOAT) {
ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
} else {
ARM_FMSR (code, ins->dreg, ARMREG_R0);
ARM_CVTS (code, ins->dreg, ins->dreg);
}
} else {
if (IS_HARD_FLOAT) {
ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
} else {
ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
}
}
}
break;
case OP_RCALL:
case OP_RCALL_REG:
case OP_RCALL_MEMBASE: {
MonoType *sig_ret;
g_assert (IS_VFP);
sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
g_assert (sig_ret->type == MONO_TYPE_R4);
if (IS_HARD_FLOAT) {
ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
} else {
ARM_FMSR (code, ins->dreg, ARMREG_R0);
ARM_CPYS (code, ins->dreg, ins->dreg);
}
break;
}
default:
break;
}
return code;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
MonoInst *last_ins = NULL;
int max_len, cpos;
int imm8, rot_amount;
/* we don't align basic blocks of loops on arm */
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
cpos = bb->max_offset;
if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
code = emit_call_seq (cfg, code);
}
MONO_BB_FOR_EACH_INS (bb, ins) {
guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
// if (ins->cil_code)
// g_print ("cil code\n");
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
case OP_MEMORY_BARRIER:
if (v7_supported) {
ARM_DMB (code, ARM_DMB_ISH);
} else if (v6_supported) {
ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
}
break;
case OP_TLS_GET:
code = emit_tls_get (code, ins->dreg, ins->inst_offset);
break;
case OP_TLS_SET:
code = emit_tls_set (code, ins->sreg1, ins->inst_offset);
break;
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_ADD_I4: {
int tmpreg;
guint8 *buf [16];
g_assert (v7_supported);
/* Free up a reg */
if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
tmpreg = ARMREG_IP;
else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
tmpreg = ARMREG_R0;
else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
tmpreg = ARMREG_R1;
else
tmpreg = ARMREG_R2;
g_assert (cfg->arch.atomic_tmp_offset != -1);
ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
switch (ins->opcode) {
case OP_ATOMIC_EXCHANGE_I4:
buf [0] = code;
ARM_DMB (code, ARM_DMB_ISH);
ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
buf [1] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (buf [1], buf [0]);
break;
case OP_ATOMIC_CAS_I4:
ARM_DMB (code, ARM_DMB_ISH);
buf [0] = code;
ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
buf [1] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
buf [2] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (buf [2], buf [0]);
arm_patch (buf [1], code);
break;
case OP_ATOMIC_ADD_I4:
buf [0] = code;
ARM_DMB (code, ARM_DMB_ISH);
ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
buf [1] = code;
ARM_B_COND (code, ARMCOND_NE, 0);
arm_patch (buf [1], buf [0]);
break;
default:
g_assert_not_reached ();
}
ARM_DMB (code, ARM_DMB_ISH);
if (tmpreg != ins->dreg)
ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
break;
}
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8: {
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
ARM_DMB (code, ARM_DMB_ISH);
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
switch (ins->opcode) {
case OP_ATOMIC_LOAD_I1:
ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_U1:
ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_I2:
ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_U2:
ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U4:
ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
break;
case OP_ATOMIC_LOAD_R4:
if (cfg->r4fp) {
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
} else {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
ARM_CVTS (code, ins->dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
break;
case OP_ATOMIC_LOAD_R8:
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
break;
}
if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
ARM_DMB (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8: {
if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
ARM_DMB (code, ARM_DMB_ISH);
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
switch (ins->opcode) {
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1:
ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
break;
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2:
ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
break;
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4:
ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
break;
case OP_ATOMIC_STORE_R4:
if (cfg->r4fp) {
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0);
} else {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
ARM_CVTD (code, vfp_scratch1, ins->sreg1);
ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
break;
case OP_ATOMIC_STORE_R8:
ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
break;
}
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
ARM_DMB (code, ARM_DMB_ISH);
break;
}
case OP_BIGMUL:
ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_BIGMUL_UN:
ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_STOREI1_MEMBASE_IMM:
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_IMM:
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI1_MEMBASE_REG:
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_REG:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
/* this case is special, since it happens for spill code after lowering has been called */
if (arm_is_imm12 (ins->inst_offset)) {
ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
}
break;
case OP_STOREI1_MEMINDEX:
ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_STOREI2_MEMINDEX:
ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_STORE_MEMINDEX:
case OP_STOREI4_MEMINDEX:
ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
break;
case OP_LOADU4_MEM:
g_assert_not_reached ();
break;
case OP_LOAD_MEMINDEX:
case OP_LOADI4_MEMINDEX:
case OP_LOADU4_MEMINDEX:
ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADI1_MEMINDEX:
ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADU1_MEMINDEX:
ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADI2_MEMINDEX:
ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOADU2_MEMINDEX:
ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
/* this case is special, since it happens for spill code after lowering has been called */
if (arm_is_imm12 (ins->inst_offset)) {
ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
}
break;
case OP_LOADI1_MEMBASE:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU1_MEMBASE:
g_assert (arm_is_imm12 (ins->inst_offset));
ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU2_MEMBASE:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADI2_MEMBASE:
g_assert (arm_is_imm8 (ins->inst_offset));
ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_ICONV_TO_I1:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
break;
case OP_ICONV_TO_I2:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
break;
case OP_ICONV_TO_U1:
ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
break;
case OP_ICONV_TO_U2:
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
break;
case OP_COMPARE:
case OP_ICOMPARE:
ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
case OP_ICOMPARE_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
break;
case OP_BREAK:
/*
* gdb does not like encountering the hw breakpoint ins in the debugged code.
* So instead of emitting a trap, we emit a call a C function and place a
* breakpoint there.
*/
//*(int*)code = 0xef9f0001;
//code += 4;
//ARM_DBRK (code);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
code = emit_call_seq (cfg, code);
break;
case OP_RELAXED_NOP:
ARM_NOP (code);
break;
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
case OP_SEQ_POINT: {
int i;
MonoInst *info_var = cfg->arch.seq_point_info_var;
MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
MonoInst *var;
int dreg = ARMREG_LR;
#if 0
if (cfg->soft_breakpoints) {
g_assert (!cfg->compile_aot);
}
#endif
/*
* For AOT, we use one got slot per method, which will point to a
* SeqPointInfo structure, containing all the information required
* by the code below.
*/
if (cfg->compile_aot) {
g_assert (info_var);
g_assert (info_var->opcode == OP_REGOFFSET);
}
if (!cfg->soft_breakpoints && !cfg->compile_aot) {
/*
* Read from the single stepping trigger page. This will cause a
* SIGSEGV when single stepping is enabled.
* We do this _before_ the breakpoint, so single stepping after
* a breakpoint is hit will step to the next IL offset.
*/
g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
}
/* Single step check */
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
if (cfg->soft_breakpoints) {
/* Load the address of the sequence point method variable. */
var = ss_method_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
/* Read the value and check whether it is non-zero. */
ARM_LDR_IMM (code, dreg, dreg, 0);
ARM_CMP_REG_IMM (code, dreg, 0, 0);
/* Call it conditionally. */
ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
} else {
if (cfg->compile_aot) {
/* Load the trigger page addr from the variable initialized in the prolog */
var = ss_trigger_page_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
} else {
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(int*)code = (int)(gsize)ss_trigger_page;
code += 4;
}
ARM_LDR_IMM (code, dreg, dreg, 0);
}
}
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
/* Breakpoint check */
if (cfg->compile_aot) {
const guint32 offset = code - cfg->native_code;
guint32 val;
var = info_var;
code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
/* Add the offset */
val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
if (arm_is_imm12 ((int)val)) {
ARM_LDR_IMM (code, dreg, dreg, val);
} else {
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
if (val & 0xFF00)
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
if (val & 0xFF0000)
ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
g_assert (!(val & 0xFF000000));
ARM_LDR_IMM (code, dreg, dreg, 0);
}
/* What is faster, a branch or a load ? */
ARM_CMP_REG_IMM (code, dreg, 0, 0);
/* The breakpoint instruction */
if (cfg->soft_breakpoints)
ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
else
ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
} else if (cfg->soft_breakpoints) {
/* Load the address of the breakpoint method into ip. */
var = bp_method_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (var->inst_offset));
ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
ARM_NOP (code);
} else {
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
for (i = 0; i < 4; ++i)
ARM_NOP (code);
}
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
* to another IL offset.
*/
ARM_NOP (code);
break;
}
case OP_ADDCC:
case OP_IADDCC:
ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IADD:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADC:
case OP_IADC:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADDCC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADC_IMM:
case OP_IADC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IADD_OVF:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_IADD_OVF_UN:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_ISUB_OVF:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_ISUB_OVF_UN:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
case OP_ADD_OVF_CARRY:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_ADD_OVF_UN_CARRY:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUB_OVF_CARRY:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUB_OVF_UN_CARRY:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUBCC:
case OP_ISUBCC:
ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUBCC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ISUB:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SBB:
case OP_ISBB:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUB_IMM:
case OP_ISUB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_SBB_IMM:
case OP_ISBB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ARM_RSBS_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ARM_RSC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IAND:
ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_AND_IMM:
case OP_IAND_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IDIV:
g_assert (v7s_supported || v7k_supported);
ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IDIV_UN:
g_assert (v7s_supported || v7k_supported);
ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IREM:
g_assert (v7s_supported || v7k_supported);
ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
break;
case OP_IREM_UN:
g_assert (v7s_supported || v7k_supported);
ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
break;
case OP_DIV_IMM:
case OP_REM_IMM:
g_assert_not_reached ();
case OP_IOR:
ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_OR_IMM:
case OP_IOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_IXOR:
ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_XOR_IMM:
case OP_IXOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ISHL:
ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHL_IMM:
case OP_ISHL_IMM:
if (ins->inst_imm)
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_ISHR:
ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHR_IMM:
case OP_ISHR_IMM:
if (ins->inst_imm)
ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_SHR_UN_IMM:
case OP_ISHR_UN_IMM:
if (ins->inst_imm)
ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_ISHR_UN:
ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_INOT:
ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_INEG:
ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
break;
case OP_IMUL:
if (ins->dreg == ins->sreg2)
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
else
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
break;
case OP_MUL_IMM:
g_assert_not_reached ();
break;
case OP_IMUL_OVF:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
/* FIXME: MUL doesn't set the C/O flags on ARM */
break;
case OP_IMUL_OVF_UN:
/* FIXME: handle ovf/ sreg2 != dreg */
ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
/* FIXME: MUL doesn't set the C/O flags on ARM */
break;
case OP_ICONST:
code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
break;
case OP_AOTCONST:
/* Load the GOT offset */
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
/* Load the value from the GOT */
ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
break;
case OP_OBJC_GET_SELECTOR:
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
break;
case OP_ICONV_TO_I4:
case OP_ICONV_TO_U4:
case OP_MOVE:
if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_SETLRET: {
int saved = ins->sreg2;
if (ins->sreg2 == ARM_LSW_REG) {
ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
saved = ARMREG_LR;
}
if (ins->sreg1 != ARM_LSW_REG)
ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
if (saved != ARM_MSW_REG)
ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
break;
}
case OP_FMOVE:
if (IS_VFP && ins->dreg != ins->sreg1)
ARM_CPYD (code, ins->dreg, ins->sreg1);
break;
case OP_RMOVE:
if (IS_VFP && ins->dreg != ins->sreg1)
ARM_CPYS (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I4:
if (cfg->r4fp) {
ARM_FMRS (code, ins->dreg, ins->sreg1);
} else {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_CVTD (code, vfp_scratch1, ins->sreg1);
ARM_FMRS (code, ins->dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
break;
case OP_MOVE_I4_TO_F:
if (cfg->r4fp) {
ARM_FMSR (code, ins->dreg, ins->sreg1);
} else {
ARM_FMSR (code, ins->dreg, ins->sreg1);
ARM_CVTS (code, ins->dreg, ins->dreg);
}
break;
case OP_FCONV_TO_R4:
if (IS_VFP) {
if (cfg->r4fp) {
ARM_CVTD (code, ins->dreg, ins->sreg1);
} else {
ARM_CVTD (code, ins->dreg, ins->sreg1);
ARM_CVTS (code, ins->dreg, ins->dreg);
}
}
break;
case OP_TAILCALL_PARAMETER:
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL:
case OP_TAILCALL_MEMBASE:
case OP_TAILCALL_REG: {
gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE;
gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG;
MonoCallInst *call = (MonoCallInst*)ins;
max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, call, code, &max_len, &offset);
code = realloc_code (cfg, max_len);
// For reg and membase, get destination in IP.
if (tailcall_reg) {
g_assert (ins->sreg1 > -1);
if (ins->sreg1 != ARMREG_IP)
ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1);
} else if (tailcall_membase) {
g_assert (ins->sreg1 > -1);
if (!arm_is_imm12 (ins->inst_offset)) {
g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add
code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0);
} else {
ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
}
}
/*
* The stack looks like the following:
* <caller argument area>
* <saved regs etc>
* <rest of frame>
* <callee argument area>
* <optionally saved IP> (about to be)
* Need to copy the arguments from the callee argument area to
* the caller argument area, and pop the frame.
*/
if (call->stack_usage) {
int i, prev_sp_offset = 0;
// When we get here, the parameters to the tailcall are already formed,
// in registers and at the bottom of the grow-down stack.
//
// Our goal is generally preserve parameters, and trim the stack,
// and, before trimming stack, move parameters from the bottom of the
// frame to the bottom of the trimmed frame.
// For the case of large frames, and presently therefore always,
// IP is used as an adjusted frame_reg.
// Be conservative and save IP around the movement
// of parameters from the bottom of frame to top of the frame.
const gboolean save_ip = tailcall_membase || tailcall_reg;
if (save_ip)
ARM_PUSH (code, 1 << ARMREG_IP);
// When moving stacked parameters from the bottom
// of the frame (sp) to the top of the frame (ip),
// account, 0 or 4, for the conditional save of IP.
const int offset_sp = save_ip ? 4 : 0;
const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0;
/* Compute size of saved registers restored below */
if (iphone_abi)
prev_sp_offset = 2 * 4;
else
prev_sp_offset = 1 * 4;
for (i = 0; i < 16; ++i) {
if (cfg->used_int_regs & (1 << i))
prev_sp_offset += 4;
}
// Point IP at the start of where the parameters will go after trimming stack.
// After locals and saved registers.
code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
/* Copy arguments on the stack to our argument area */
// FIXME a fixed size memcpy is desirable here,
// at least for larger values of stack_usage.
//
// FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
// See https://github.com/mono/mono/pull/12079
// See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp);
ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip);
}
if (save_ip)
ARM_POP (code, 1 << ARMREG_IP);
}
/*
* Keep in sync with mono_arch_emit_epilog
*/
g_assert (!cfg->method->save_lmf);
code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR);
if (iphone_abi) {
if (cfg->used_int_regs)
ARM_POP (code, cfg->used_int_regs);
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
} else {
ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
}
if (tailcall_reg || tailcall_membase) {
code = emit_jmp_reg (code, ARMREG_IP);
} else {
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
if (cfg->compile_aot) {
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
} else {
code = mono_arm_patchable_b (code, ARMCOND_AL);
cfg->thunk_area += THUNK_SIZE;
}
}
break;
}
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
break;
case OP_ARGLIST: {
g_assert (cfg->sig_cookie < 128);
ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
break;
}
case OP_FCALL:
case OP_RCALL:
case OP_LCALL:
case OP_VCALL:
case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, call, code, &max_len, &offset);
mono_call_add_patch_info (cfg, call, code - cfg->native_code);
code = emit_call_seq (cfg, code);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
code = emit_call_reg (code, ins->sreg1);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
g_assert (ins->sreg1 != ARMREG_LR);
call = (MonoCallInst*)ins;
if (IS_HARD_FLOAT)
code = emit_float_args (cfg, call, code, &max_len, &offset);
if (!arm_is_imm12 (ins->inst_offset)) {
/* sreg1 might be IP */
ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR);
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
} else {
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
}
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
code = emit_move_return_value (cfg, ins, code);
break;
}
case OP_GENERIC_CLASS_INIT: {
int byte_offset;
guint8 *jump;
byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
g_assert (arm_is_imm8 (byte_offset));
ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset);
ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
jump = code;
ARM_B_COND (code, ARMCOND_NE, 0);
/* Uninitialized case */
g_assert (ins->sreg1 == ARMREG_R0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
code = emit_call_seq (cfg, code);
/* Initialized case */
arm_patch (jump, code);
break;
}
case OP_LOCALLOC: {
/* round the size to 8 bytes */
ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1));
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
/* memzero the area: dreg holds the size, sp is the pointer */
if (ins->flags & MONO_INST_INIT) {
guint8 *start_loop, *branch_to_cond;
ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
branch_to_cond = code;
ARM_B (code, 0);
start_loop = code;
ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
arm_patch (branch_to_cond, code);
/* decrement by 4 and set flags */
ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t));
ARM_B_COND (code, ARMCOND_GE, 0);
arm_patch (code - 4, start_loop);
}
ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
if (cfg->param_area)
code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
break;
}
case OP_DYN_CALL: {
int i;
MonoInst *var = cfg->dyn_call_var;
guint8 *labels [16];
g_assert (var->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (var->inst_offset));
/* lr = args buffer filled by mono_arch_get_dyn_call_args () */
ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
/* ip = ftn */
ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2);
/* Save args buffer */
ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
/* Set fp argument registers */
if (IS_HARD_FLOAT) {
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs));
ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0);
labels [0] = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
for (i = 0; i < FP_PARAM_REGS; ++i) {
const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double));
g_assert (arm_is_fpimm8 (offset));
ARM_FLDD (code, i * 2, ARMREG_LR, offset);
}
arm_patch (labels [0], code);
}
/* Allocate callee area */
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1);
/* Set stack args */
/* R1 = limit */
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
/* R2 = pointer into regs */
code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t)));
/* R3 = pointer to stack */
ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP);
/* Loop */
labels [0] = code;
ARM_B_COND (code, ARMCOND_AL, 0);
labels [1] = code;
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0);
ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0);
ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0);
ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0);
ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0);
arm_patch (labels [0], code);
ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
labels [2] = code;
ARM_B_COND (code, ARMCOND_GT, 0);
arm_patch (labels [2], labels [1]);
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)));
/* Make the call */
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
/* Save result */
ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
if (IS_HARD_FLOAT)
ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs));
break;
}
case OP_THROW: {
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
code = emit_call_seq (cfg, code);
break;
}
case OP_RETHROW: {
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
code = emit_call_seq (cfg, code);
break;
}
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Reserve a param area, see filter-stack.exe */
if (param_area) {
if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
}
break;
}
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Free the param area */
if (param_area) {
if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
g_assert (ARMREG_IP != spvar->inst_basereg);
code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
}
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
int i, rot_amount;
/* Free the param area */
if (param_area) {
if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
}
if (arm_is_imm12 (spvar->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
g_assert (ARMREG_IP != spvar->inst_basereg);
code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
}
case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
code = mono_arm_patchable_bl (code, ARMCOND_AL);
cfg->thunk_area += THUNK_SIZE;
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
break;
case OP_GET_EX_OBJ:
if (ins->dreg != ARMREG_R0)
ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0);
break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
case OP_BR:
/*if (ins->inst_target_bb->native_offset) {
ARM_B (code, 0);
//x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
} else*/ {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
code = mono_arm_patchable_b (code, ARMCOND_AL);
}
break;
case OP_BR_REG:
ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
break;
case OP_SWITCH:
/*
* In the normal case we have:
* ldr pc, [pc, ins->sreg1 << 2]
* nop
* If aot, we have:
* ldr lr, [pc, ins->sreg1 << 2]
* add pc, pc, lr
* After follows the data.
* FIXME: add aot support.
*/
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
max_len += 4 * GPOINTER_TO_INT (ins->klass);
code = realloc_code (cfg, max_len);
ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
ARM_NOP (code);
code += 4 * GPOINTER_TO_INT (ins->klass);
break;
case OP_CEQ:
case OP_ICEQ:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_CLT:
case OP_ICLT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
break;
case OP_CLT_UN:
case OP_ICLT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
break;
case OP_CGT:
case OP_ICGT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
break;
case OP_CGT_UN:
case OP_ICGT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
break;
case OP_ICNEQ:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
break;
case OP_ICGE:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
break;
case OP_ICLE:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
break;
case OP_ICGE_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
break;
case OP_ICLE_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_LT:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_GT:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_GE:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_LE:
case OP_COND_EXC_LE_UN:
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
break;
case OP_COND_EXC_IEQ:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_ILT:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_IGT:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_IGE:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_ILE:
case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
break;
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
break;
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
break;
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
break;
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
break;
case OP_IBEQ:
case OP_IBNE_UN:
case OP_IBLT:
case OP_IBLT_UN:
case OP_IBGT:
case OP_IBGT_UN:
case OP_IBGE:
case OP_IBGE_UN:
case OP_IBLE:
case OP_IBLE_UN:
EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
break;
/* floating point opcodes */
case OP_R8CONST:
if (cfg->compile_aot) {
ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 1);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
*(guint32*)code = ((guint32*)(ins->inst_p0))[1];
code += 4;
} else {
/* FIXME: we can optimize the imm load by dealing with part of
* the displacement in LDFD (aligning to 512).
*/
code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
}
break;
case OP_R4CONST:
if (cfg->compile_aot) {
ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(guint32*)code = ((guint32*)(ins->inst_p0))[0];
code += 4;
if (!cfg->r4fp)
ARM_CVTS (code, ins->dreg, ins->dreg);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
if (!cfg->r4fp)
ARM_CVTS (code, ins->dreg, ins->dreg);
}
break;
case OP_STORER8_MEMBASE_REG:
/* This is generated by the local regalloc pass which runs after the lowering pass */
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
} else {
ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADR8_MEMBASE:
/* This is generated by the local regalloc pass which runs after the lowering pass */
if (!arm_is_fpimm8 (ins->inst_offset)) {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
} else {
ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
}
break;
case OP_STORER4_MEMBASE_REG:
g_assert (arm_is_fpimm8 (ins->inst_offset));
if (cfg->r4fp) {
ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
} else {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_CVTD (code, vfp_scratch1, ins->sreg1);
ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
break;
case OP_LOADR4_MEMBASE:
if (cfg->r4fp) {
ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
} else {
g_assert (arm_is_fpimm8 (ins->inst_offset));
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
ARM_CVTS (code, ins->dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
break;
case OP_ICONV_TO_R_UN: {
g_assert_not_reached ();
break;
}
case OP_ICONV_TO_R4:
if (cfg->r4fp) {
ARM_FMSR (code, ins->dreg, ins->sreg1);
ARM_FSITOS (code, ins->dreg, ins->dreg);
} else {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_FMSR (code, vfp_scratch1, ins->sreg1);
ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
ARM_CVTS (code, ins->dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
}
break;
case OP_ICONV_TO_R8:
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
ARM_FMSR (code, vfp_scratch1, ins->sreg1);
ARM_FSITOD (code, ins->dreg, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
break;
case OP_SETFRET: {
MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
if (sig_ret->type == MONO_TYPE_R4) {
if (cfg->r4fp) {
if (IS_HARD_FLOAT) {
if (ins->sreg1 != ARM_VFP_D0)
ARM_CPYS (code, ARM_VFP_D0, ins->sreg1);
} else {
ARM_FMRS (code, ARMREG_R0, ins->sreg1);
}
} else {
ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
if (!IS_HARD_FLOAT)
ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
}
} else {
if (IS_HARD_FLOAT)
ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
else
ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
}
break;
}
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
case OP_FCONV_TO_U1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
break;
case OP_FCONV_TO_I2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
break;
case OP_FCONV_TO_U2:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
break;
case OP_FCONV_TO_I4:
case OP_FCONV_TO_I:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
break;
case OP_FCONV_TO_U4:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
break;
case OP_FCONV_TO_I8:
case OP_FCONV_TO_U8:
g_assert_not_reached ();
/* Implemented as helper calls */
break;
case OP_LCONV_TO_R_UN:
g_assert_not_reached ();
/* Implemented as helper calls */
break;
case OP_LCONV_TO_OVF_I4_2: {
guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
/*
* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
*/
ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
high_bit_not_set = code;
ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
valid_negative = code;
ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
invalid_negative = code;
ARM_B_COND (code, ARMCOND_AL, 0);
arm_patch (high_bit_not_set, code);
ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
valid_positive = code;
ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
arm_patch (invalid_negative, code);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
arm_patch (valid_negative, code);
arm_patch (valid_positive, code);
if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
}
case OP_FADD:
ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FSUB:
ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FMUL:
ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FDIV:
ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_FNEG:
ARM_NEGD (code, ins->dreg, ins->sreg1);
break;
case OP_FREM:
/* emulated */
g_assert_not_reached ();
break;
case OP_FCOMPARE:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
break;
case OP_RCOMPARE:
g_assert (IS_VFP);
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
break;
case OP_FCEQ:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_FCLT:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCLT_UN:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_FCGT:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_FCGT_UN:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_FCNEQ:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
break;
case OP_FCGE:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
case OP_FCLE:
if (IS_VFP) {
ARM_CMPD (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
/* ARM FPA flags table:
* N Less than ARMCOND_MI
* Z Equal ARMCOND_EQ
* C Greater Than or Equal ARMCOND_CS
* V Unordered ARMCOND_VS
*/
case OP_FBEQ:
EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
break;
case OP_FBNE_UN:
EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
break;
case OP_FBLT:
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBLT_UN:
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBGT:
case OP_FBGT_UN:
case OP_FBLE:
case OP_FBLE_UN:
g_assert_not_reached ();
break;
case OP_FBGE:
if (IS_VFP) {
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
} else {
/* FPA requires EQ even thou the docs suggests that just CS is enough */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
}
break;
case OP_FBGE_UN:
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
break;
case OP_CKFINITE: {
if (IS_VFP) {
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
ARM_ABSD (code, vfp_scratch2, ins->sreg1);
ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
ARM_B (code, 1);
*(guint32*)code = 0xffffffff;
code += 4;
*(guint32*)code = 0x7fefffff;
code += 4;
ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
ARM_FMSTAT (code);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException");
ARM_CMPD (code, ins->sreg1, ins->sreg1);
ARM_FMSTAT (code);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException");
ARM_CPYD (code, ins->dreg, ins->sreg1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
}
break;
}
case OP_RCONV_TO_I1:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
case OP_RCONV_TO_U1:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
break;
case OP_RCONV_TO_I2:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
break;
case OP_RCONV_TO_U2:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
break;
case OP_RCONV_TO_I4:
case OP_RCONV_TO_I:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
break;
case OP_RCONV_TO_U4:
code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
break;
case OP_RCONV_TO_R4:
g_assert (IS_VFP);
if (ins->dreg != ins->sreg1)
ARM_CPYS (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R8:
g_assert (IS_VFP);
ARM_CVTS (code, ins->dreg, ins->sreg1);
break;
case OP_RADD:
ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RSUB:
ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RMUL:
ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RDIV:
ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_RNEG:
ARM_NEGS (code, ins->dreg, ins->sreg1);
break;
case OP_RCEQ:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_RCLT:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_RCLT_UN:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_RCGT:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
break;
case OP_RCGT_UN:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
break;
case OP_RCNEQ:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
break;
case OP_RCGE:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg1, ins->sreg2);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
case OP_RCLE:
if (IS_VFP) {
ARM_CMPS (code, ins->sreg2, ins->sreg1);
ARM_FMSTAT (code);
}
ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
break;
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
break;
case OP_GC_SPILL_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
guint8 *buf [1];
ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
buf [0] = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
code = emit_call_seq (cfg, code);
arm_patch (buf [0], code);
break;
}
case OP_FILL_PROF_CALL_CTX:
for (int i = 0; i < ARMREG_MAX; i++)
if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
}
cpos += max_len;
last_ins = ins;
}
set_code_cursor (cfg, code);
}
#endif /* DISABLE_JIT */
void
mono_arch_register_lowlevel_calls (void)
{
/* The signature doesn't matter */
mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE);
mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE);
mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE);
}
#define patch_lis_ori(ip,val) do {\
guint16 *__lis_ori = (guint16*)(ip); \
__lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
__lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
} while (0)
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
unsigned char *ip = ji->ip.i + code;
switch (ji->type) {
case MONO_PATCH_INFO_SWITCH: {
gpointer *jt = (gpointer*)(ip + 8);
int i;
/* jt is the inlined jump table, 2 instructions after ip
* In the normal case we store the absolute addresses,
* otherwise the displacements.
*/
for (i = 0; i < ji->data.table->table_size; i++)
jt [i] = code + (int)(gsize)ji->data.table->table [i];
break;
}
case MONO_PATCH_INFO_IP:
g_assert_not_reached ();
patch_lis_ori (ip, ip);
break;
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IMAGE:
case MONO_PATCH_INFO_FIELD:
case MONO_PATCH_INFO_VTABLE:
case MONO_PATCH_INFO_IID:
case MONO_PATCH_INFO_SFLDA:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
g_assert_not_reached ();
/* from OP_AOTCONST : lis + ori */
patch_lis_ori (ip, target);
break;
case MONO_PATCH_INFO_R4:
case MONO_PATCH_INFO_R8:
g_assert_not_reached ();
*((gconstpointer *)(ip + 2)) = target;
break;
case MONO_PATCH_INFO_EXC_NAME:
g_assert_not_reached ();
*((gconstpointer *)(ip + 1)) = target;
break;
case MONO_PATCH_INFO_NONE:
case MONO_PATCH_INFO_BB_OVF:
case MONO_PATCH_INFO_EXC_OVF:
/* everything is dealt with at epilog output time */
break;
default:
arm_patch_general (cfg, ip, (const guchar*)target);
break;
}
}
void
mono_arm_unaligned_stack (MonoMethod *method)
{
g_assert_not_reached ();
}
#ifndef DISABLE_JIT
/*
* Stack frame layout:
*
* ------------------- fp
* MonoLMF structure or saved registers
* -------------------
* locals
* -------------------
* spilled regs
* -------------------
* param area size is cfg->param_area
* ------------------- sp
*/
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part;
guint8 *code;
CallInfo *cinfo;
int lmf_offset = 0;
int prev_sp_offset, reg_offset;
sig = mono_method_signature_internal (method);
cfg->code_size = 256 + sig->param_count * 64;
code = cfg->native_code = g_malloc (cfg->code_size);
mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
alloc_size = cfg->stack_offset;
pos = 0;
prev_sp_offset = 0;
if (iphone_abi) {
/*
* The iphone uses R7 as the frame pointer, and it points at the saved
* r7+lr:
* <lr>
* r7 -> <r7>
* <rest of frame>
* We can't use r7 as a frame pointer since it points into the middle of
* the frame, so we keep using our own frame pointer.
* FIXME: Optimize this.
*/
ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
prev_sp_offset += 8; /* r7 and lr */
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
}
if (!method->save_lmf) {
if (iphone_abi) {
/* No need to push LR again */
if (cfg->used_int_regs)
ARM_PUSH (code, cfg->used_int_regs);
} else {
ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
prev_sp_offset += 4;
}
for (i = 0; i < 16; ++i) {
if (cfg->used_int_regs & (1 << i))
prev_sp_offset += 4;
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
reg_offset = 0;
for (i = 0; i < 16; ++i) {
if ((cfg->used_int_regs & (1 << i))) {
mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
reg_offset += 4;
}
}
mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
} else {
ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
ARM_PUSH (code, 0x5ff0);
prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
reg_offset = 0;
for (i = 0; i < 16; ++i) {
if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
/* The original r7 is saved at the start */
if (!(iphone_abi && i == ARMREG_R7))
mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
reg_offset += 4;
}
}
g_assert (reg_offset == 4 * 10);
pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10);
lmf_offset = pos;
}
alloc_size += pos;
orig_alloc_size = alloc_size;
// align to MONO_ARCH_FRAME_ALIGNMENT bytes
if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
}
/* the stack used in the pushed regs */
alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset;
cfg->stack_usage = alloc_size;
if (alloc_size) {
if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
}
if (cfg->frame_reg != ARMREG_SP) {
ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
}
//g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
prev_sp_offset += alloc_size;
for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
/* compute max_offset in order to use short forward jumps
* we could skip do it on arm because the immediate displacement
* for jumps is large enough, it may be useful later for constant pools
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins = bb->code;
bb->max_offset = max_offset;
MONO_BB_FOR_EACH_INS (bb, ins)
max_offset += ins_get_size (ins->opcode);
}
/* stack alignment check */
/*
{
guint8 *buf [16];
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
buf [0] = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
if (cfg->compile_aot)
ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
else
code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
code = emit_call_seq (cfg, code);
arm_patch (buf [0], code);
}
*/
/* store runtime generic context */
if (cfg->rgctx_var) {
MonoInst *ins = cfg->rgctx_var;
g_assert (ins->opcode == OP_REGOFFSET);
if (arm_is_imm12 (ins->inst_offset)) {
ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
}
mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
/* load arguments allocated to register from the stack */
cinfo = get_call_info (NULL, sig);
if (cinfo->ret.storage == RegTypeStructByAddr) {
ArgInfo *ainfo = &cinfo->ret;
inst = cfg->vret_addr;
g_assert (arm_is_imm12 (inst->inst_offset));
ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
}
if (sig->call_convention == MONO_CALL_VARARG) {
ArgInfo *cookie = &cinfo->sig_cookie;
/* Save the sig cookie address */
g_assert (cookie->storage == RegTypeBase);
g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
g_assert (arm_is_imm12 (cfg->sig_cookie));
ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
inst = cfg->args [i];
if (cfg->verbose_level > 2)
g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
if (inst->opcode == OP_REGVAR) {
if (ainfo->storage == RegTypeGeneral)
ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
else if (ainfo->storage == RegTypeFP) {
g_assert_not_reached ();
} else if (ainfo->storage == RegTypeBase) {
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
}
} else
g_assert_not_reached ();
if (i == 0 && sig->hasthis) {
g_assert (ainfo->storage == RegTypeGeneral);
mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, inst, TRUE, inst->dreg, 0, code - cfg->native_code, 0);
}
if (cfg->verbose_level > 2)
g_print ("Argument %d assigned to register %s\n", i, mono_arch_regname (inst->dreg));
} else {
switch (ainfo->storage) {
case RegTypeHFA:
for (part = 0; part < ainfo->nregs; part ++) {
if (ainfo->esize == 4)
ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
else
ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
}
break;
case RegTypeGeneral:
case RegTypeIRegPair:
case RegTypeGSharedVtInReg:
case RegTypeStructByAddr:
switch (ainfo->size) {
case 1:
if (arm_is_imm12 (inst->inst_offset))
ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
break;
case 2:
if (arm_is_imm8 (inst->inst_offset)) {
ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
break;
case 8:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
if (arm_is_imm12 (inst->inst_offset + 4)) {
ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
}
break;
default:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
}
break;
}
if (i == 0 && sig->hasthis) {
g_assert (ainfo->storage == RegTypeGeneral);
mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, inst, FALSE, inst->inst_basereg, inst->inst_offset, code - cfg->native_code, 0);
}
break;
case RegTypeBaseGen:
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
}
if (arm_is_imm12 (inst->inst_offset + 4)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
}
break;
case RegTypeBase:
case RegTypeGSharedVtOnStack:
case RegTypeStructByAddrOnStack:
if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
}
switch (ainfo->size) {
case 1:
if (arm_is_imm8 (inst->inst_offset)) {
ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
case 2:
if (arm_is_imm8 (inst->inst_offset)) {
ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
case 8:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
}
if (arm_is_imm12 (inst->inst_offset + 4)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
default:
if (arm_is_imm12 (inst->inst_offset)) {
ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
}
break;
}
break;
case RegTypeFP: {
int imm8, rot_amount;
if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
} else
ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
if (ainfo->size == 8)
ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
else
ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
break;
}
case RegTypeStructByVal: {
int doffset = inst->inst_offset;
int soffset = 0;
int cur_reg;
int size = 0;
size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke && !sig->marshalling_disabled);
for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
if (arm_is_imm12 (doffset)) {
ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
}
soffset += sizeof (target_mgreg_t);
doffset += sizeof (target_mgreg_t);
}
if (ainfo->vtsize) {
/* FIXME: handle overrun! with struct sizes not multiple of 4 */
//g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
}
break;
}
default:
g_assert_not_reached ();
break;
}
}
}
if (method->save_lmf)
code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
if (cfg->arch.seq_point_info_var) {
MonoInst *ins = cfg->arch.seq_point_info_var;
/* Initialize the variable from a GOT slot */
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
g_assert (ins->opcode == OP_REGOFFSET);
if (arm_is_imm12 (ins->inst_offset)) {
ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
}
}
/* Initialize ss_trigger_page_var */
if (!cfg->soft_breakpoints) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
int dreg = ARMREG_LR;
if (info_var) {
g_assert (info_var->opcode == OP_REGOFFSET);
code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset);
/* Load the trigger page addr */
ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
}
}
if (cfg->arch.seq_point_ss_method_var) {
MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
g_assert (ss_method_ins->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
if (cfg->compile_aot) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
int dreg = ARMREG_LR;
g_assert (info_var->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (info_var->inst_offset));
ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
} else {
g_assert (bp_method_ins->opcode == OP_REGOFFSET);
g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
ARM_B (code, 1);
*(gpointer*)code = &single_step_tramp;
code += 4;
*(gpointer*)code = breakpoint_tramp;
code += 4;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
}
}
set_code_cursor (cfg, code);
g_free (cinfo);
return code;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
int pos, i, rot_amount;
int max_epilog_size = 16 + 20*4;
guint8 *code;
CallInfo *cinfo;
if (cfg->method->save_lmf)
max_epilog_size += 128;
code = realloc_code (cfg, max_epilog_size);
/* Save the uwind state which is needed by the out-of-line code */
mono_emit_unwind_op_remember_state (cfg, code);
pos = 0;
/* Load returned vtypes into registers if needed */
cinfo = cfg->arch.cinfo;
switch (cinfo->ret.storage) {
case RegTypeStructByVal: {
MonoInst *ins = cfg->ret;
if (cinfo->ret.nregs == 1) {
if (arm_is_imm12 (ins->inst_offset)) {
ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
}
} else {
for (i = 0; i < cinfo->ret.nregs; ++i) {
int offset = ins->inst_offset + (i * 4);
if (arm_is_imm12 (offset)) {
ARM_LDR_IMM (code, i, ins->inst_basereg, offset);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_LR, offset);
ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR);
}
}
}
break;
}
case RegTypeHFA: {
MonoInst *ins = cfg->ret;
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
else
ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
}
break;
}
default:
break;
}
if (method->save_lmf) {
int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0;
/* all but r0-r3, sp and pc */
pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
lmf_offset = pos;
code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
/* This points to r4 inside MonoLMF->iregs */
sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
reg = ARMREG_R4;
regmask = 0x9ff0; /* restore lr to pc */
/* Skip caller saved registers not used by the method */
while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
regmask &= ~(1 << reg);
sp_adj += 4;
reg ++;
}
if (iphone_abi)
/* Restored later */
regmask &= ~(1 << ARMREG_PC);
/* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
for (i = 0; i < 16; i++) {
if (regmask & (1 << i))
nused_int_regs ++;
}
mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4);
/* restore iregs */
ARM_POP (code, regmask);
if (iphone_abi) {
for (i = 0; i < 16; i++) {
if (regmask & (1 << i))
mono_emit_unwind_op_same_value (cfg, code, i);
}
/* Restore saved r7, restore LR to PC */
/* Skip lr from the lmf */
mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4);
ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0);
mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
}
} else {
int i, nused_int_regs = 0;
for (i = 0; i < 16; i++) {
if (cfg->used_int_regs & (1 << i))
nused_int_regs ++;
}
if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
} else {
code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
}
if (cfg->frame_reg != ARMREG_SP) {
mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP);
}
if (iphone_abi) {
/* Restore saved gregs */
if (cfg->used_int_regs) {
mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4);
ARM_POP (code, cfg->used_int_regs);
for (i = 0; i < 16; i++) {
if (cfg->used_int_regs & (1 << i))
mono_emit_unwind_op_same_value (cfg, code, i);
}
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
/* Restore saved r7, restore LR to PC */
ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
} else {
mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4);
ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
}
}
/* Restore the unwind state to be the same as before the epilog */
mono_emit_unwind_op_restore_state (cfg, code);
set_code_cursor (cfg, code);
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
int i;
guint8 *code;
guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
int max_epilog_size = 50;
for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
exc_throw_pos [i] = NULL;
exc_throw_found [i] = 0;
}
/* count the number of exception infos */
/*
* make sure we have enough space for exceptions
*/
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_EXC) {
i = mini_exception_id_by_name ((const char*)patch_info->data.target);
if (!exc_throw_found [i]) {
max_epilog_size += 32;
exc_throw_found [i] = TRUE;
}
}
}
code = realloc_code (cfg, max_epilog_size);
/* add code to raise exceptions */
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC: {
MonoClass *exc_class;
unsigned char *ip = patch_info->ip.i + cfg->native_code;
i = mini_exception_id_by_name ((const char*)patch_info->data.target);
if (exc_throw_pos [i]) {
arm_patch (ip, exc_throw_pos [i]);
patch_info->type = MONO_PATCH_INFO_NONE;
break;
} else {
exc_throw_pos [i] = code;
}
arm_patch (ip, code);
exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
patch_info->ip.i = code - cfg->native_code;
ARM_BL (code, 0);
cfg->thunk_area += THUNK_SIZE;
*(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF;
code += 4;
break;
}
default:
/* do nothing */
break;
}
}
set_code_cursor (cfg, code);
}
#endif /* #ifndef DISABLE_JIT */
void
mono_arch_finish_init (void)
{
}
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
/* FIXME: */
return NULL;
}
#ifndef DISABLE_JIT
#endif
guint32
mono_arch_get_patch_offset (guint8 *code)
{
/* OP_AOTCONST */
return 8;
}
void
mono_arch_flush_register_windows (void)
{
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
return l;
}
/* #define ENABLE_WRONG_METHOD_CHECK 1 */
#define BASE_SIZE (6 * 4)
#define BSEARCH_ENTRY_SIZE (4 * 4)
#define CMP_SIZE (3 * 4)
#define BRANCH_SIZE (1 * 4)
#define CALL_SIZE (2 * 4)
#define WMC_SIZE (8 * 4)
#define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
static arminstr_t *
arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
{
guint32 delta = DISTANCE (target, code);
delta -= 8;
g_assert (delta >= 0 && delta <= 0xFFF);
*target = *target | delta;
*code = value;
return code + 1;
}
#ifdef ENABLE_WRONG_METHOD_CHECK
static void
mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
{
g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
g_assert (0);
}
#endif
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int size, i;
arminstr_t *code, *start;
gboolean large_offsets = FALSE;
guint32 **constant_pool_starts;
arminstr_t *vtable_target = NULL;
int extra_space = 0;
#ifdef ENABLE_WRONG_METHOD_CHECK
char * cond;
#endif
GSList *unwind_ops;
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
size = BASE_SIZE;
constant_pool_starts = g_new0 (guint32*, count);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
item->chunk_size += 32;
large_offsets = TRUE;
}
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case)
item->chunk_size += CMP_SIZE;
item->chunk_size += BRANCH_SIZE;
} else {
#ifdef ENABLE_WRONG_METHOD_CHECK
item->chunk_size += WMC_SIZE;
#endif
}
if (fail_case) {
item->chunk_size += 16;
large_offsets = TRUE;
}
item->chunk_size += CALL_SIZE;
} else {
item->chunk_size += BSEARCH_ENTRY_SIZE;
imt_entries [item->check_target_idx]->compare_done = TRUE;
}
size += item->chunk_size;
}
if (large_offsets)
size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
if (fail_tramp) {
code = (arminstr_t *)mini_alloc_generic_virtual_trampoline (vtable, size);
} else {
code = mono_mem_manager_code_reserve (mem_manager, size);
}
start = code;
unwind_ops = mono_arch_get_cie_program ();
#ifdef DEBUG_IMT
g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
}
#endif
if (large_offsets) {
ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (target_mgreg_t));
} else {
ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
}
ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
vtable_target = code;
ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
gint32 vtable_offset;
item->code_target = (guint8*)code;
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
imt_method = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
}
item->jmp_code = (guint8*)code;
ARM_B_COND (code, ARMCOND_NE, 0);
} else {
/*Enable the commented code to assert on wrong method*/
#ifdef ENABLE_WRONG_METHOD_CHECK
imt_method = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
cond = code;
ARM_B_COND (code, ARMCOND_EQ, 0);
/* Define this if your system is so bad that gdb is failing. */
#ifdef BROKEN_DEV_ENV
ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
ARM_BL (code, 0);
arm_patch (code - 1, mini_dump_bad_imt);
#else
ARM_DBRK (code);
#endif
arm_patch (cond, code);
#endif
}
if (item->has_target_code) {
/* Load target address */
target_code_ins = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
/* Save it to the fourth slot */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
/* Restore registers and branch */
ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
} else {
vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
if (!arm_is_imm12 (vtable_offset)) {
/*
* We need to branch to a computed address but we don't have
* a free register to store it, since IP must contain the
* vtable address. So we push the two values to the stack, and
* load them both using LDM.
*/
/* Compute target address */
vtable_offset_ins = code;
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
/* Save it to the fourth slot */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
/* Restore registers and branch */
ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
} else {
ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
if (large_offsets) {
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (target_mgreg_t));
}
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
}
}
if (fail_case) {
arm_patch (item->jmp_code, (guchar*)code);
target_code_ins = code;
/* Load target address */
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
/* Save it to the fourth slot */
ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
/* Restore registers and branch */
ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
item->jmp_code = NULL;
}
if (imt_method)
code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key);
/*must emit after unconditional branch*/
if (vtable_target) {
code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable);
item->chunk_size += 4;
vtable_target = NULL;
}
/*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
constant_pool_starts [i] = code;
if (extra_space) {
code += extra_space;
extra_space = 0;
}
} else {
ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
item->jmp_code = (guint8*)code;
ARM_B_COND (code, ARMCOND_HS, 0);
++extra_space;
}
}
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code) {
if (item->check_target_idx)
arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
}
if (i > 0 && item->is_equals) {
int j;
arminstr_t *space_start = constant_pool_starts [i];
for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key);
}
}
}
#ifdef DEBUG_IMT
{
char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count);
mono_disassemble_code (NULL, (guint8*)start, size, buff);
g_free (buff);
}
#endif
g_free (constant_pool_starts);
mono_arch_flush_icache ((guint8*)start, size);
MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
g_assert (DISTANCE (start, code) <= size);
mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), mem_manager);
return start;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->regs [reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->regs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->regs [reg] = val;
}
/*
* mono_arch_get_trampolines:
*
* Return a list of MonoTrampInfo structures describing arch specific trampolines
* for AOT.
*/
GSList *
mono_arch_get_trampolines (gboolean aot)
{
return mono_arm_get_exception_trampolines (aot);
}
#if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
/*
* mono_arch_set_breakpoint:
*
* Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
* The location should contain code emitted by OP_SEQ_POINT.
*/
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
guint32 native_offset = ip - (guint8*)ji->code_start;
if (ji->from_aot) {
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (!breakpoint_tramp)
breakpoint_tramp = mini_get_breakpoint_trampoline ();
g_assert (native_offset % 4 == 0);
g_assert (info->bp_addrs [native_offset / 4] == 0);
info->bp_addrs [native_offset / 4] = (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page);
} else if (mini_debug_options.soft_breakpoints) {
code += 4;
ARM_BLX_REG (code, ARMREG_LR);
mono_arch_flush_icache (code - 4, 4);
} else {
int dreg = ARMREG_LR;
/* Read from another trigger page */
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(int*)code = (int)(gssize)bp_trigger_page;
code += 4;
ARM_LDR_IMM (code, dreg, dreg, 0);
mono_arch_flush_icache (code - 16, 16);
#if 0
/* This is currently implemented by emitting an SWI instruction, which
* qemu/linux seems to convert to a SIGILL.
*/
*(int*)code = (0xef << 24) | 8;
code += 4;
mono_arch_flush_icache (code - 4, 4);
#endif
}
}
/*
* mono_arch_clear_breakpoint:
*
* Clear the breakpoint at IP.
*/
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
int i;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (!breakpoint_tramp)
breakpoint_tramp = mini_get_breakpoint_trampoline ();
g_assert (native_offset % 4 == 0);
g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page));
info->bp_addrs [native_offset / 4] = 0;
} else if (mini_debug_options.soft_breakpoints) {
code += 4;
ARM_NOP (code);
mono_arch_flush_icache (code - 4, 4);
} else {
for (i = 0; i < 4; ++i)
ARM_NOP (code);
mono_arch_flush_icache (ip, code - ip);
}
}
/*
* mono_arch_start_single_stepping:
*
* Start single stepping.
*/
void
mono_arch_start_single_stepping (void)
{
if (ss_trigger_page)
mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
else
single_step_tramp = mini_get_single_step_trampoline ();
}
/*
* mono_arch_stop_single_stepping:
*
* Stop single stepping.
*/
void
mono_arch_stop_single_stepping (void)
{
if (ss_trigger_page)
mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
else
single_step_tramp = NULL;
}
#if __APPLE__
#define DBG_SIGNAL SIGBUS
#else
#define DBG_SIGNAL SIGSEGV
#endif
/*
* mono_arch_is_single_step_event:
*
* Return whenever the machine state in SIGCTX corresponds to a single
* step event.
*/
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
siginfo_t *sinfo = (siginfo_t*)info;
if (!ss_trigger_page)
return FALSE;
/* Sometimes the address is off by 4 */
if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
return TRUE;
else
return FALSE;
}
/*
* mono_arch_is_breakpoint_event:
*
* Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
*/
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
siginfo_t *sinfo = (siginfo_t*)info;
if (!ss_trigger_page)
return FALSE;
if (sinfo->si_signo == DBG_SIGNAL) {
/* Sometimes the address is off by 4 */
if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
return TRUE;
else
return FALSE;
} else {
return FALSE;
}
}
/*
* mono_arch_skip_breakpoint:
*
* See mini-amd64.c for docs.
*/
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
}
/*
* mono_arch_skip_single_step:
*
* See mini-amd64.c for docs.
*/
void
mono_arch_skip_single_step (MonoContext *ctx)
{
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
}
/*
* mono_arch_get_seq_point_info:
*
* See mini-amd64.c for docs.
*/
SeqPointInfo*
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
jit_mm = get_default_jit_mm ();
// FIXME: Add a free function
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
info->ss_trigger_page = ss_trigger_page;
info->bp_trigger_page = bp_trigger_page;
info->ss_tramp_addr = &single_step_tramp;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
#endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
/*
* mono_arch_set_target:
*
* Set the target architecture the JIT backend should generate code for, in the form
* of a GNU target triplet. Only used in AOT mode.
*/
void
mono_arch_set_target (char *mtriple)
{
/* The GNU target triple format is not very well documented */
if (strstr (mtriple, "armv7")) {
v5_supported = TRUE;
v6_supported = TRUE;
v7_supported = TRUE;
}
if (strstr (mtriple, "armv6")) {
v5_supported = TRUE;
v6_supported = TRUE;
}
if (strstr (mtriple, "armv7s")) {
v7s_supported = TRUE;
}
if (strstr (mtriple, "armv7k")) {
v7k_supported = TRUE;
}
if (strstr (mtriple, "thumbv7s")) {
v5_supported = TRUE;
v6_supported = TRUE;
v7_supported = TRUE;
v7s_supported = TRUE;
thumb_supported = TRUE;
thumb2_supported = TRUE;
}
if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
v5_supported = TRUE;
v6_supported = TRUE;
thumb_supported = TRUE;
iphone_abi = TRUE;
}
if (strstr (mtriple, "gnueabi"))
eabi_supported = TRUE;
}
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
return v7_supported;
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8:
return v7_supported && IS_VFP;
default:
return FALSE;
}
}
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
return get_call_info (mp, sig);
}
gpointer
mono_arch_get_get_tls_tramp (void)
{
return NULL;
}
static G_GNUC_UNUSED guint8*
emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data)
{
/* OP_AOTCONST */
mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
/* Load the value from the GOT */
ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
return code;
}
guint8*
mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data)
{
MonoJumpInfo **ji = (MonoJumpInfo**)ji_list;
*ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data);
ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
ARM_B (code, 0);
*(gpointer*)code = NULL;
code += 4;
ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
return code;
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
gpointer target = NULL;
switch (jit_icall_id) {
#undef MONO_AOT_ICALL
#define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
MONO_AOT_ICALL (mono_arm_resume_unwind)
MONO_AOT_ICALL (mono_arm_start_gsharedvt_call)
MONO_AOT_ICALL (mono_arm_throw_exception)
MONO_AOT_ICALL (mono_arm_throw_exception_by_token)
MONO_AOT_ICALL (mono_arm_unaligned_stack)
}
return target;
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-arm.h | /**
* \file
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_MINI_ARM_H__
#define __MONO_MINI_ARM_H__
#include <mono/arch/arm/arm-codegen.h>
#include <mono/utils/mono-context.h>
#include <glib.h>
#if defined(ARM_FPU_NONE)
#define MONO_ARCH_SOFT_FLOAT_FALLBACK 1
#endif
#if defined(__ARM_EABI__)
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define ARM_ARCHITECTURE "armel"
#else
#define ARM_ARCHITECTURE "armeb"
#endif
#else
#define ARM_ARCHITECTURE "arm"
#endif
#if defined(ARM_FPU_VFP)
#define ARM_FP_MODEL "vfp"
#elif defined(ARM_FPU_NONE)
#define ARM_FP_MODEL "vfp+fallback"
#elif defined(ARM_FPU_VFP_HARD)
#define ARM_FP_MODEL "vfp+hard"
#else
#error "At least one of ARM_FPU_NONE, ARM_FPU_VFP or ARM_FPU_VFP_HARD must be defined."
#endif
#define MONO_ARCH_ARCHITECTURE ARM_ARCHITECTURE "," ARM_FP_MODEL
#define MONO_ARCH_CPU_SPEC mono_arm_cpu_desc
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define ARM_LSW_REG ARMREG_R0
#define ARM_MSW_REG ARMREG_R1
#else
#define ARM_LSW_REG ARMREG_R1
#define ARM_MSW_REG ARMREG_R0
#endif
#define MONO_MAX_IREGS 16
#define MONO_SAVED_GREGS 10 /* r4-r11, ip, lr */
/* r4-r11, ip, lr: registers saved in the LMF */
#define MONO_ARM_REGSAVE_MASK 0x5ff0
#define MONO_ARM_FIRST_SAVED_REG ARMREG_R4
#define MONO_ARM_NUM_SAVED_REGS 10
/* Parameters used by the register allocator */
#define MONO_ARCH_CALLEE_REGS ((1<<ARMREG_R0) | (1<<ARMREG_R1) | (1<<ARMREG_R2) | (1<<ARMREG_R3) | (1<<ARMREG_IP))
#define MONO_ARCH_CALLEE_SAVED_REGS ((1<<ARMREG_V1) | (1<<ARMREG_V2) | (1<<ARMREG_V3) | (1<<ARMREG_V4) | (1<<ARMREG_V5) | (1<<ARMREG_V6) | (1<<ARMREG_V7))
/*
* TODO: Make use of VFP v3 registers d16-d31.
*/
/*
* TODO: We can't use registers d8-d15 in hard float mode because the
* register allocator doesn't allocate floating point registers globally.
*/
#if defined(ARM_FPU_VFP_HARD)
#define MONO_SAVED_FREGS 16
#define MONO_MAX_FREGS 32
/*
* d8-d15 must be preserved across function calls. We use d14-d15 as
* scratch registers in the JIT. The rest have no meaning tied to them.
*/
#define MONO_ARCH_CALLEE_FREGS 0x00005555
#define MONO_ARCH_CALLEE_SAVED_FREGS 0x55550000
#else
#define MONO_SAVED_FREGS 8
#define MONO_MAX_FREGS 16
/*
* No registers need to be preserved across function calls. We use d0-d1
* as scratch registers in the JIT. The rest have no meaning tied to them.
*/
#define MONO_ARCH_CALLEE_FREGS 0x55555550
#define MONO_ARCH_CALLEE_SAVED_FREGS 0x00000000
#endif
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_SREG2_MASK(ins) (0)
#define MONO_ARCH_INST_FIXED_REG(desc) \
(mono_arch_is_soft_float () ? \
((desc) == 'l' || (desc) == 'f' || (desc) == 'g' ? ARM_LSW_REG : (desc) == 'a' ? ARMREG_R0 : -1) : \
((desc) == 'l' ? ARM_LSW_REG : (desc) == 'a' ? ARMREG_R0 : -1))
#define MONO_ARCH_INST_IS_REGPAIR(desc) \
(mono_arch_is_soft_float () ? \
((desc) == 'l' || (desc) == 'L' || (desc) == 'f' || (desc) == 'g') : \
((desc) == 'l' || (desc) == 'L'))
#define MONO_ARCH_INST_IS_FLOAT(desc) \
(mono_arch_is_soft_float () ? \
(FALSE) : \
((desc) == 'f' || (desc) == 'g'))
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) ((desc) == 'l' || (desc) == 'f' || (desc) == 'g' ? ARM_MSW_REG : -1)
#ifdef TARGET_WATCHOS
#define MONO_ARCH_FRAME_ALIGNMENT 16
#else
#define MONO_ARCH_FRAME_ALIGNMENT 8
#endif
/* fixme: align to 16byte instead of 32byte (we align to 32byte to get
* reproduceable results for benchmarks */
#define MONO_ARCH_CODE_ALIGNMENT 32
/* This needs to hold both a 32 bit int and a 64 bit double */
#define mono_unwind_reg_t guint64
/* Argument marshallings for calls between gsharedvt and normal code */
typedef enum {
GSHAREDVT_ARG_NONE = 0,
GSHAREDVT_ARG_BYVAL_TO_BYREF = 1,
GSHAREDVT_ARG_BYREF_TO_BYVAL = 2,
GSHAREDVT_ARG_BYREF_TO_BYVAL_I1 = 3,
GSHAREDVT_ARG_BYREF_TO_BYVAL_I2 = 4,
GSHAREDVT_ARG_BYREF_TO_BYVAL_U1 = 5,
GSHAREDVT_ARG_BYREF_TO_BYVAL_U2 = 6
} GSharedVtArgMarshal;
/* Return value marshalling for calls between gsharedvt and normal code */
typedef enum {
GSHAREDVT_RET_NONE = 0,
GSHAREDVT_RET_IREG = 1,
GSHAREDVT_RET_IREGS = 2,
GSHAREDVT_RET_I1 = 3,
GSHAREDVT_RET_U1 = 4,
GSHAREDVT_RET_I2 = 5,
GSHAREDVT_RET_U2 = 6,
GSHAREDVT_RET_VFP_R4 = 7,
GSHAREDVT_RET_VFP_R8 = 8
} GSharedVtRetMarshal;
typedef struct {
/* Method address to call */
gpointer addr;
/* The trampoline reads this, so keep the size explicit */
int ret_marshal;
/* If ret_marshal != NONE, this is the reg of the vret arg, else -1 */
int vret_arg_reg;
/* The stack slot where the return value will be stored */
int vret_slot;
int stack_usage, map_count;
/* If not -1, then make a virtual call using this vtable offset */
int vcall_offset;
/* If 1, make an indirect call to the address in the rgctx reg */
int calli;
/* Whenever this is a in or an out call */
int gsharedvt_in;
/* Whenever this call uses fp registers */
int have_fregs;
CallInfo *caller_cinfo;
CallInfo *callee_cinfo;
/* Maps stack slots/registers in the caller to the stack slots/registers in the callee */
/* A negative value means a register, i.e. -1=r0, -2=r1 etc. */
int map [MONO_ZERO_LEN_ARRAY];
} GSharedVtCallInfo;
typedef enum {
RegTypeNone,
/* Passed/returned in an ireg */
RegTypeGeneral,
/* Passed/returned in a pair of iregs */
RegTypeIRegPair,
/* Passed on the stack */
RegTypeBase,
/* First word in r3, second word on the stack */
RegTypeBaseGen,
/* FP value passed in either an ireg or a vfp reg */
RegTypeFP,
/* Struct passed/returned in gregs */
RegTypeStructByVal,
RegTypeStructByAddr,
RegTypeStructByAddrOnStack,
/* gsharedvt argument passed by addr in greg */
RegTypeGSharedVtInReg,
/* gsharedvt argument passed by addr on stack */
RegTypeGSharedVtOnStack,
RegTypeHFA
} ArgStorage;
typedef struct {
gint32 offset;
guint16 vtsize; /* in param area */
/* RegTypeHFA */
int esize;
/* RegTypeHFA/RegTypeStructByVal */
int nregs;
guint8 reg;
ArgStorage storage;
/* RegTypeStructByVal */
gint32 struct_size, align;
guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
guint8 is_signed : 1;
} ArgInfo;
struct CallInfo {
int nargs;
guint32 stack_usage;
/* The index of the vret arg in the argument list for RegTypeStructByAddr */
int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
};
#define PARAM_REGS 4
#define FP_PARAM_REGS 8
typedef struct {
/* General registers */
host_mgreg_t gregs [PARAM_REGS];
/* Floating registers */
float fregs [FP_PARAM_REGS * 2];
/* Stack usage, used for passing params on stack */
guint32 stack_size;
guint8 *stack;
} CallContext;
/* Structure used by the sequence points in AOTed code */
struct SeqPointInfo {
gpointer ss_trigger_page;
gpointer bp_trigger_page;
gpointer ss_tramp_addr;
guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
};
typedef struct {
double fpregs [FP_PARAM_REGS];
host_mgreg_t res, res2;
guint8 *ret;
guint32 has_fpregs;
guint32 n_stackargs;
/* This should come last as the structure is dynamically extended */
host_mgreg_t regs [PARAM_REGS];
} DynCallArgs;
void arm_patch (guchar *code, const guchar *target);
guint8* mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val);
int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
void
mono_arm_throw_exception_by_token (guint32 type_token, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs);
gpointer
mono_arm_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg, double *caller_fregs, double *callee_fregs);
typedef enum {
MONO_ARM_FPU_NONE = 0,
MONO_ARM_FPU_VFP = 1,
MONO_ARM_FPU_VFP_HARD = 2
} MonoArmFPU;
/* keep the size of the structure a multiple of 8 */
struct MonoLMF {
/*
* If the second lowest bit is set to 1, then this is a MonoLMFExt structure, and
* the other fields are not valid.
*/
gpointer previous_lmf;
gpointer lmf_addr;
/* This is only set in trampoline LMF frames */
MonoMethod *method;
host_mgreg_t sp;
host_mgreg_t ip;
host_mgreg_t fp;
/* Currently only used in trampolines on armhf to hold d0-d15. We don't really
* need to put d0-d7 in the LMF, but it simplifies the trampoline code.
*/
double fregs [16];
/* all but sp and pc: matches the PUSH instruction layout in the trampolines
* 0-4 should be considered undefined (execpt in the magic tramp)
* sp is saved at IP.
*/
host_mgreg_t iregs [14];
};
typedef struct MonoCompileArch {
MonoInst *seq_point_info_var;
MonoInst *ss_trigger_page_var;
MonoInst *seq_point_ss_method_var;
MonoInst *seq_point_bp_method_var;
MonoInst *vret_addr_loc;
gboolean omit_fp;
gboolean omit_fp_computed;
CallInfo *cinfo;
MonoInst *vfp_scratch_slots [2];
int atomic_tmp_offset;
guint8 *thunks;
int thunks_size;
} MonoCompileArch;
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_EMULATE_FCONV_TO_I8 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R4 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8_UN 1
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_EMULATE_DIV 1
#define MONO_ARCH_EMULATE_CONV_R8_UN 1
#define MONO_ARCH_EMULATE_MUL_OVF 1
#define ARM_FIRST_ARG_REG 0
#define ARM_LAST_ARG_REG 3
#define MONO_ARCH_USE_SIGACTION 1
#if defined(HOST_WATCHOS)
#undef MONO_ARCH_USE_SIGACTION
#endif
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_HAVE_DECOMPOSE_LONG_OPTS 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_PARAM_AREA 0
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#if !(defined(TARGET_ANDROID) && defined(MONO_CROSS_COMPILE))
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#endif
#define MONO_ARCH_HAVE_EXCEPTIONS_INIT 1
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_GC_MAPS_SUPPORTED 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
#define MONO_ARCH_HAVE_OPCODE_NEEDS_EMULATION 1
#define MONO_ARCH_HAVE_OBJC_GET_SELECTOR 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-p:32:32-n32-S64"
#define MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE 1
#define MONO_ARCH_HAVE_FTNPTR_ARG_TRAMPOLINE 1
#define MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP 1
#define MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED 1
#if defined(TARGET_WATCHOS) || (defined(__linux__) && !defined(TARGET_ANDROID))
#define MONO_ARCH_DISABLE_HW_TRAPS 1
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#endif
/* ARM doesn't have too many registers, so we have to use a callee saved one */
#define MONO_ARCH_RGCTX_REG ARMREG_V5
#define MONO_ARCH_IMT_REG MONO_ARCH_RGCTX_REG
/* First argument reg */
#define MONO_ARCH_VTABLE_REG ARMREG_R0
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->regs [0] = (gsize)exc; } while (0)
#if defined(HOST_WIN32)
#define __builtin_extract_return_addr(x) x
#define __builtin_return_address(x) _ReturnAddress()
#define __builtin_frame_address(x) _AddressOfReturnAddress()
#endif
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,func) do { \
MONO_CONTEXT_SET_BP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_SP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_IP ((ctx), (func)); \
} while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
#if defined(TARGET_TVOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_EXPLICIT_NULL_CHECKS 1
#endif
void
mono_arm_throw_exception (MonoObject *exc, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs, gboolean preserve_ips);
void
mono_arm_throw_exception_by_token (guint32 type_token, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs);
void
mono_arm_resume_unwind (guint32 dummy1, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs);
gboolean
mono_arm_thumb_supported (void);
gboolean
mono_arm_eabi_supported (void);
int
mono_arm_i8_align (void);
GSList*
mono_arm_get_exception_trampolines (gboolean aot);
guint8*
mono_arm_get_thumb_plt_entry (guint8 *code);
guint8*
mono_arm_patchable_b (guint8 *code, int cond);
guint8*
mono_arm_patchable_bl (guint8 *code, int cond);
gboolean
mono_arm_is_hard_float (void);
void
mono_arm_unaligned_stack (MonoMethod *method);
/* MonoJumpInfo **ji */
guint8*
mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data);
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig);
#endif /* __MONO_MINI_ARM_H__ */
| /**
* \file
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_MINI_ARM_H__
#define __MONO_MINI_ARM_H__
#include <mono/arch/arm/arm-codegen.h>
#include <mono/utils/mono-context.h>
#include <glib.h>
#if defined(ARM_FPU_NONE)
#define MONO_ARCH_SOFT_FLOAT_FALLBACK 1
#endif
#if defined(__ARM_EABI__)
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define ARM_ARCHITECTURE "armel"
#else
#define ARM_ARCHITECTURE "armeb"
#endif
#else
#define ARM_ARCHITECTURE "arm"
#endif
#if defined(ARM_FPU_VFP)
#define ARM_FP_MODEL "vfp"
#elif defined(ARM_FPU_NONE)
#define ARM_FP_MODEL "vfp+fallback"
#elif defined(ARM_FPU_VFP_HARD)
#define ARM_FP_MODEL "vfp+hard"
#else
#error "At least one of ARM_FPU_NONE, ARM_FPU_VFP or ARM_FPU_VFP_HARD must be defined."
#endif
#define MONO_ARCH_ARCHITECTURE ARM_ARCHITECTURE "," ARM_FP_MODEL
#define MONO_ARCH_CPU_SPEC mono_arm_cpu_desc
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define ARM_LSW_REG ARMREG_R0
#define ARM_MSW_REG ARMREG_R1
#else
#define ARM_LSW_REG ARMREG_R1
#define ARM_MSW_REG ARMREG_R0
#endif
#define MONO_MAX_IREGS 16
#define MONO_SAVED_GREGS 10 /* r4-r11, ip, lr */
/* r4-r11, ip, lr: registers saved in the LMF */
#define MONO_ARM_REGSAVE_MASK 0x5ff0
#define MONO_ARM_FIRST_SAVED_REG ARMREG_R4
#define MONO_ARM_NUM_SAVED_REGS 10
/* Parameters used by the register allocator */
#define MONO_ARCH_CALLEE_REGS ((1<<ARMREG_R0) | (1<<ARMREG_R1) | (1<<ARMREG_R2) | (1<<ARMREG_R3) | (1<<ARMREG_IP))
#define MONO_ARCH_CALLEE_SAVED_REGS ((1<<ARMREG_V1) | (1<<ARMREG_V2) | (1<<ARMREG_V3) | (1<<ARMREG_V4) | (1<<ARMREG_V5) | (1<<ARMREG_V6) | (1<<ARMREG_V7))
/*
* TODO: Make use of VFP v3 registers d16-d31.
*/
/*
* TODO: We can't use registers d8-d15 in hard float mode because the
* register allocator doesn't allocate floating point registers globally.
*/
#if defined(ARM_FPU_VFP_HARD)
#define MONO_SAVED_FREGS 16
#define MONO_MAX_FREGS 32
/*
* d8-d15 must be preserved across function calls. We use d14-d15 as
* scratch registers in the JIT. The rest have no meaning tied to them.
*/
#define MONO_ARCH_CALLEE_FREGS 0x00005555
#define MONO_ARCH_CALLEE_SAVED_FREGS 0x55550000
#else
#define MONO_SAVED_FREGS 8
#define MONO_MAX_FREGS 16
/*
* No registers need to be preserved across function calls. We use d0-d1
* as scratch registers in the JIT. The rest have no meaning tied to them.
*/
#define MONO_ARCH_CALLEE_FREGS 0x55555550
#define MONO_ARCH_CALLEE_SAVED_FREGS 0x00000000
#endif
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_SREG2_MASK(ins) (0)
#define MONO_ARCH_INST_FIXED_REG(desc) \
(mono_arch_is_soft_float () ? \
((desc) == 'l' || (desc) == 'f' || (desc) == 'g' ? ARM_LSW_REG : (desc) == 'a' ? ARMREG_R0 : -1) : \
((desc) == 'l' ? ARM_LSW_REG : (desc) == 'a' ? ARMREG_R0 : -1))
#define MONO_ARCH_INST_IS_REGPAIR(desc) \
(mono_arch_is_soft_float () ? \
((desc) == 'l' || (desc) == 'L' || (desc) == 'f' || (desc) == 'g') : \
((desc) == 'l' || (desc) == 'L'))
#define MONO_ARCH_INST_IS_FLOAT(desc) \
(mono_arch_is_soft_float () ? \
(FALSE) : \
((desc) == 'f' || (desc) == 'g'))
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) ((desc) == 'l' || (desc) == 'f' || (desc) == 'g' ? ARM_MSW_REG : -1)
#ifdef TARGET_WATCHOS
#define MONO_ARCH_FRAME_ALIGNMENT 16
#else
#define MONO_ARCH_FRAME_ALIGNMENT 8
#endif
/* fixme: align to 16byte instead of 32byte (we align to 32byte to get
* reproduceable results for benchmarks */
#define MONO_ARCH_CODE_ALIGNMENT 32
/* This needs to hold both a 32 bit int and a 64 bit double */
#define mono_unwind_reg_t guint64
/* Argument marshallings for calls between gsharedvt and normal code */
typedef enum {
GSHAREDVT_ARG_NONE = 0,
GSHAREDVT_ARG_BYVAL_TO_BYREF = 1,
GSHAREDVT_ARG_BYREF_TO_BYVAL = 2,
GSHAREDVT_ARG_BYREF_TO_BYVAL_I1 = 3,
GSHAREDVT_ARG_BYREF_TO_BYVAL_I2 = 4,
GSHAREDVT_ARG_BYREF_TO_BYVAL_U1 = 5,
GSHAREDVT_ARG_BYREF_TO_BYVAL_U2 = 6
} GSharedVtArgMarshal;
/* Return value marshalling for calls between gsharedvt and normal code */
typedef enum {
GSHAREDVT_RET_NONE = 0,
GSHAREDVT_RET_IREG = 1,
GSHAREDVT_RET_IREGS = 2,
GSHAREDVT_RET_I1 = 3,
GSHAREDVT_RET_U1 = 4,
GSHAREDVT_RET_I2 = 5,
GSHAREDVT_RET_U2 = 6,
GSHAREDVT_RET_VFP_R4 = 7,
GSHAREDVT_RET_VFP_R8 = 8
} GSharedVtRetMarshal;
typedef struct {
/* Method address to call */
gpointer addr;
/* The trampoline reads this, so keep the size explicit */
int ret_marshal;
/* If ret_marshal != NONE, this is the reg of the vret arg, else -1 */
int vret_arg_reg;
/* The stack slot where the return value will be stored */
int vret_slot;
int stack_usage, map_count;
/* If not -1, then make a virtual call using this vtable offset */
int vcall_offset;
/* If 1, make an indirect call to the address in the rgctx reg */
int calli;
/* Whenever this is a in or an out call */
int gsharedvt_in;
/* Whenever this call uses fp registers */
int have_fregs;
CallInfo *caller_cinfo;
CallInfo *callee_cinfo;
/* Maps stack slots/registers in the caller to the stack slots/registers in the callee */
/* A negative value means a register, i.e. -1=r0, -2=r1 etc. */
int map [MONO_ZERO_LEN_ARRAY];
} GSharedVtCallInfo;
typedef enum {
RegTypeNone,
/* Passed/returned in an ireg */
RegTypeGeneral,
/* Passed/returned in a pair of iregs */
RegTypeIRegPair,
/* Passed on the stack */
RegTypeBase,
/* First word in r3, second word on the stack */
RegTypeBaseGen,
/* FP value passed in either an ireg or a vfp reg */
RegTypeFP,
/* Struct passed/returned in gregs */
RegTypeStructByVal,
RegTypeStructByAddr,
RegTypeStructByAddrOnStack,
/* gsharedvt argument passed by addr in greg */
RegTypeGSharedVtInReg,
/* gsharedvt argument passed by addr on stack */
RegTypeGSharedVtOnStack,
RegTypeHFA
} ArgStorage;
typedef struct {
gint32 offset;
guint16 vtsize; /* in param area */
/* RegTypeHFA */
int esize;
/* RegTypeHFA/RegTypeStructByVal */
int nregs;
guint8 reg;
ArgStorage storage;
/* RegTypeStructByVal */
gint32 struct_size, align;
guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
guint8 is_signed : 1;
} ArgInfo;
struct CallInfo {
int nargs;
guint32 stack_usage;
/* The index of the vret arg in the argument list for RegTypeStructByAddr */
int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
};
#define PARAM_REGS 4
#define FP_PARAM_REGS 8
typedef struct {
/* General registers */
host_mgreg_t gregs [PARAM_REGS];
/* Floating registers */
float fregs [FP_PARAM_REGS * 2];
/* Stack usage, used for passing params on stack */
guint32 stack_size;
guint8 *stack;
} CallContext;
/* Structure used by the sequence points in AOTed code */
struct SeqPointInfo {
gpointer ss_trigger_page;
gpointer bp_trigger_page;
gpointer ss_tramp_addr;
guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
};
typedef struct {
double fpregs [FP_PARAM_REGS];
host_mgreg_t res, res2;
guint8 *ret;
guint32 has_fpregs;
guint32 n_stackargs;
/* This should come last as the structure is dynamically extended */
host_mgreg_t regs [PARAM_REGS];
} DynCallArgs;
void arm_patch (guchar *code, const guchar *target);
guint8* mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val);
int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
void
mono_arm_throw_exception_by_token (guint32 type_token, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs);
gpointer
mono_arm_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg, double *caller_fregs, double *callee_fregs);
typedef enum {
MONO_ARM_FPU_NONE = 0,
MONO_ARM_FPU_VFP = 1,
MONO_ARM_FPU_VFP_HARD = 2
} MonoArmFPU;
/* keep the size of the structure a multiple of 8 */
struct MonoLMF {
/*
* If the second lowest bit is set to 1, then this is a MonoLMFExt structure, and
* the other fields are not valid.
*/
gpointer previous_lmf;
gpointer lmf_addr;
/* This is only set in trampoline LMF frames */
MonoMethod *method;
host_mgreg_t sp;
host_mgreg_t ip;
host_mgreg_t fp;
/* Currently only used in trampolines on armhf to hold d0-d15. We don't really
* need to put d0-d7 in the LMF, but it simplifies the trampoline code.
*/
double fregs [16];
/* all but sp and pc: matches the PUSH instruction layout in the trampolines
* 0-4 should be considered undefined (execpt in the magic tramp)
* sp is saved at IP.
*/
host_mgreg_t iregs [14];
};
typedef struct MonoCompileArch {
MonoInst *seq_point_info_var;
MonoInst *ss_trigger_page_var;
MonoInst *seq_point_ss_method_var;
MonoInst *seq_point_bp_method_var;
MonoInst *vret_addr_loc;
gboolean omit_fp;
gboolean omit_fp_computed;
CallInfo *cinfo;
MonoInst *vfp_scratch_slots [2];
int atomic_tmp_offset;
guint8 *thunks;
int thunks_size;
} MonoCompileArch;
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_EMULATE_FCONV_TO_I8 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R4 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8_UN 1
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_EMULATE_DIV 1
#define MONO_ARCH_EMULATE_CONV_R8_UN 1
#define MONO_ARCH_EMULATE_MUL_OVF 1
#define ARM_FIRST_ARG_REG 0
#define ARM_LAST_ARG_REG 3
#define MONO_ARCH_USE_SIGACTION 1
#if defined(HOST_WATCHOS)
#undef MONO_ARCH_USE_SIGACTION
#endif
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_HAVE_DECOMPOSE_LONG_OPTS 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_PARAM_AREA 0
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#if !(defined(TARGET_ANDROID) && defined(MONO_CROSS_COMPILE))
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#endif
#define MONO_ARCH_HAVE_EXCEPTIONS_INIT 1
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_GC_MAPS_SUPPORTED 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
#define MONO_ARCH_HAVE_OPCODE_NEEDS_EMULATION 1
#define MONO_ARCH_HAVE_OBJC_GET_SELECTOR 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_FLOAT32_SUPPORTED 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-p:32:32-n32-S64"
#define MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE 1
#define MONO_ARCH_HAVE_FTNPTR_ARG_TRAMPOLINE 1
#define MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP 1
#define MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED 1
#if defined(TARGET_WATCHOS) || (defined(__linux__) && !defined(TARGET_ANDROID))
#define MONO_ARCH_DISABLE_HW_TRAPS 1
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#endif
/* ARM doesn't have too many registers, so we have to use a callee saved one */
#define MONO_ARCH_RGCTX_REG ARMREG_V5
#define MONO_ARCH_IMT_REG MONO_ARCH_RGCTX_REG
/* First argument reg */
#define MONO_ARCH_VTABLE_REG ARMREG_R0
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->regs [0] = (gsize)exc; } while (0)
#if defined(HOST_WIN32)
#define __builtin_extract_return_addr(x) x
#define __builtin_return_address(x) _ReturnAddress()
#define __builtin_frame_address(x) _AddressOfReturnAddress()
#endif
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,func) do { \
MONO_CONTEXT_SET_BP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_SP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_IP ((ctx), (func)); \
} while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
#if defined(TARGET_TVOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_EXPLICIT_NULL_CHECKS 1
#endif
void
mono_arm_throw_exception (MonoObject *exc, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs, gboolean preserve_ips);
void
mono_arm_throw_exception_by_token (guint32 type_token, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs);
void
mono_arm_resume_unwind (guint32 dummy1, host_mgreg_t pc, host_mgreg_t sp, host_mgreg_t *int_regs, gdouble *fp_regs);
gboolean
mono_arm_thumb_supported (void);
gboolean
mono_arm_eabi_supported (void);
int
mono_arm_i8_align (void);
GSList*
mono_arm_get_exception_trampolines (gboolean aot);
guint8*
mono_arm_get_thumb_plt_entry (guint8 *code);
guint8*
mono_arm_patchable_b (guint8 *code, int cond);
guint8*
mono_arm_patchable_bl (guint8 *code, int cond);
gboolean
mono_arm_is_hard_float (void);
void
mono_arm_unaligned_stack (MonoMethod *method);
/* MonoJumpInfo **ji */
guint8*
mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data);
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig);
#endif /* __MONO_MINI_ARM_H__ */
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-arm64.c | /**
* \file
* ARM64 backend for the Mono code generator
*
* Copyright 2013 Xamarin, Inc (http://www.xamarin.com)
*
* Based on mini-arm.c:
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include "cpu-arm64.h"
#include "ir-emit.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
#include <mono/arch/arm64/arm64-codegen.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/tokentype.h>
#include "interp/interp.h"
/*
* Documentation:
*
* - ARM(R) Architecture Reference Manual, ARMv8, for ARMv8-A architecture profile (DDI0487A_a_armv8_arm.pdf)
* - Procedure Call Standard for the ARM 64-bit Architecture (AArch64) (IHI0055B_aapcs64.pdf)
* - ELF for the ARM 64-bit Architecture (IHI0056B_aaelf64.pdf)
*
* Register usage:
* - ip0/ip1/lr are used as temporary registers
* - r27 is used as the rgctx/imt register
* - r28 is used to access arguments passed on the stack
* - d15/d16 are used as fp temporary registers
*/
#define FP_TEMP_REG ARMREG_D16
#define FP_TEMP_REG2 ARMREG_D17
#define THUNK_SIZE (4 * 4)
/* The single step trampoline */
static gpointer ss_trampoline;
/* The breakpoint trampoline */
static gpointer bp_trampoline;
static gboolean ios_abi;
static gboolean enable_ptrauth;
#if defined(HOST_WIN32)
#define WARN_UNUSED_RESULT _Check_return_
#else
#define WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__))
#endif
static WARN_UNUSED_RESULT guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset);
static guint8* emit_brx (guint8 *code, int reg);
static guint8* emit_blrx (guint8 *code, int reg);
const char*
mono_arch_regname (int reg)
{
static const char * rnames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
"r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "fp",
"lr", "sp"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown";
}
const char*
mono_arch_fregname (int reg)
{
static const char * rnames[] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
"d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
"d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
"d30", "d31"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown fp";
}
const char *
mono_arch_xregname (int reg)
{
static const char * rnames[] = {
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown";
}
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
NOT_IMPLEMENTED;
return 0;
}
#define MAX_ARCH_DELEGATE_PARAMS 7
static gpointer
get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
{
guint8 *code, *start;
MINI_BEGIN_CODEGEN ();
if (has_target) {
start = code = mono_global_codeman_reserve (12);
/* Replace the this argument with the target */
arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
arm_ldrx (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
code = mono_arm_emit_brx (code, ARMREG_IP0);
g_assert ((code - start) <= 12);
} else {
int size, i;
size = 8 + param_count * 4;
start = code = mono_global_codeman_reserve (size);
arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
/* slide down the arguments */
for (i = 0; i < param_count; ++i)
arm_movx (code, i, i + 1);
code = mono_arm_emit_brx (code, ARMREG_IP0);
g_assert ((code - start) <= size);
}
MINI_END_CODEGEN (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
if (code_size)
*code_size = code - start;
return MINI_ADDR_TO_FTNPTR (start);
}
/*
* mono_arch_get_delegate_invoke_impls:
*
* Return a list of MonoAotTrampInfo structures for the delegate invoke impl
* trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
guint8 *code;
guint32 code_len;
int i;
char *tramp_name;
code = (guint8*)get_delegate_invoke_impl (TRUE, 0, &code_len);
res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
code = (guint8*)get_delegate_invoke_impl (FALSE, i, &code_len);
tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
g_free (tramp_name);
}
return res;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
/*
* vtypes are returned in registers, or using the dedicated r8 register, so
* they can be supported by delegate invokes.
*/
if (has_target) {
static guint8* cached = NULL;
if (cached)
return cached;
if (mono_ee_features.use_aot_trampolines)
start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
else
start = (guint8*)get_delegate_invoke_impl (TRUE, 0, NULL);
mono_memory_barrier ();
cached = start;
return cached;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
code = cache [sig->param_count];
if (code)
return code;
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8*)mono_aot_get_trampoline (name);
g_free (name);
} else {
start = (guint8*)get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
}
mono_memory_barrier ();
cache [sig->param_count] = start;
return start;
}
return NULL;
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
return NULL;
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [ARMREG_R0];
}
void
mono_arch_cpu_init (void)
{
}
void
mono_arch_init (void)
{
#if defined(TARGET_IOS) || defined(TARGET_WATCHOS) || defined(TARGET_OSX)
ios_abi = TRUE;
#endif
#ifdef MONO_ARCH_ENABLE_PTRAUTH
enable_ptrauth = TRUE;
#endif
if (!mono_aot_only)
bp_trampoline = mini_get_breakpoint_trampoline ();
mono_arm_gsharedvt_init ();
}
void
mono_arch_cleanup (void)
{
}
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
*exclude_mask = 0;
return 0;
}
void
mono_arch_register_lowlevel_calls (void)
{
}
void
mono_arch_finish_init (void)
{
}
/* The maximum length is 2 instructions */
static guint8*
emit_imm (guint8 *code, int dreg, int imm)
{
// FIXME: Optimize this
if (imm < 0) {
gint64 limm = imm;
arm_movnx (code, dreg, (~limm) & 0xffff, 0);
arm_movkx (code, dreg, (limm >> 16) & 0xffff, 16);
} else {
arm_movzx (code, dreg, imm & 0xffff, 0);
if (imm >> 16)
arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16);
}
return code;
}
/* The maximum length is 4 instructions */
static guint8*
emit_imm64 (guint8 *code, int dreg, guint64 imm)
{
// FIXME: Optimize this
arm_movzx (code, dreg, imm & 0xffff, 0);
if ((imm >> 16) & 0xffff)
arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16);
if ((imm >> 32) & 0xffff)
arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32);
if ((imm >> 48) & 0xffff)
arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48);
return code;
}
guint8*
mono_arm_emit_imm64 (guint8 *code, int dreg, gint64 imm)
{
return emit_imm64 (code, dreg, imm);
}
/*
* emit_imm_template:
*
* Emit a patchable code sequence for constructing a 64 bit immediate.
*/
static guint8*
emit_imm64_template (guint8 *code, int dreg)
{
arm_movzx (code, dreg, 0, 0);
arm_movkx (code, dreg, 0, 16);
arm_movkx (code, dreg, 0, 32);
arm_movkx (code, dreg, 0, 48);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_addw_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_addw (code, dreg, sreg, ARMREG_LR);
} else {
arm_addw_imm (code, dreg, sreg, imm);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_addx_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_addx (code, dreg, sreg, ARMREG_LR);
} else {
arm_addx_imm (code, dreg, sreg, imm);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_subw_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_subw (code, dreg, sreg, ARMREG_LR);
} else {
arm_subw_imm (code, dreg, sreg, imm);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_subx_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_subx (code, dreg, sreg, ARMREG_LR);
} else {
arm_subx_imm (code, dreg, sreg, imm);
}
return code;
}
/* Emit sp+=imm. Clobbers ip0/ip1 */
static WARN_UNUSED_RESULT guint8*
emit_addx_sp_imm (guint8 *code, int imm)
{
code = emit_imm (code, ARMREG_IP0, imm);
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_addx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0);
arm_movspx (code, ARMREG_SP, ARMREG_IP1);
return code;
}
/* Emit sp-=imm. Clobbers ip0/ip1 */
static WARN_UNUSED_RESULT guint8*
emit_subx_sp_imm (guint8 *code, int imm)
{
code = emit_imm (code, ARMREG_IP0, imm);
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0);
arm_movspx (code, ARMREG_SP, ARMREG_IP1);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_andw_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_andw (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_andx_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_andx (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_orrw_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_orrw (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_orrx_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_orrx (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_eorw_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_eorw (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_eorx_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_eorx (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_cmpw_imm (guint8 *code, int sreg, int imm)
{
if (imm == 0) {
arm_cmpw (code, sreg, ARMREG_RZR);
} else {
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_cmpw (code, sreg, ARMREG_LR);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_cmpx_imm (guint8 *code, int sreg, int imm)
{
if (imm == 0) {
arm_cmpx (code, sreg, ARMREG_RZR);
} else {
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_cmpx (code, sreg, ARMREG_LR);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strb (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strb_imm (imm)) {
arm_strb (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strb_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strh (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strh_imm (imm)) {
arm_strh (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strh_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strw_imm (imm)) {
arm_strw (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strw_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strfpw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strw_imm (imm)) {
arm_strfpw (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_strfpw (code, rt, ARMREG_IP0, 0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strfpx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strx_imm (imm)) {
arm_strfpx (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_strfpx (code, rt, ARMREG_IP0, 0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strx_imm (imm)) {
arm_strx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrb (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 1)) {
arm_ldrb (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrb_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrsbx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 1)) {
arm_ldrsbx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrsbx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrh (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 2)) {
arm_ldrh (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrh_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrshx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 2)) {
arm_ldrshx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrshx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrswx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 4)) {
arm_ldrswx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrswx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 4)) {
arm_ldrw (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrw_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 8)) {
arm_ldrx (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrfpw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 4)) {
arm_ldrfpw (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_ldrfpw (code, rt, ARMREG_IP0, 0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrfpx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 8)) {
arm_ldrfpx (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_ldrfpx (code, rt, ARMREG_IP0, 0);
}
return code;
}
guint8*
mono_arm_emit_ldrx (guint8 *code, int rt, int rn, int imm)
{
return emit_ldrx (code, rt, rn, imm);
}
static guint8*
emit_call (MonoCompile *cfg, guint8* code, MonoJumpInfoType patch_type, gconstpointer data)
{
/*
mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_IMM);
code = emit_imm64_template (code, ARMREG_LR);
arm_blrx (code, ARMREG_LR);
*/
mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_BL);
arm_bl (code, code);
cfg->thunk_area += THUNK_SIZE;
return code;
}
static guint8*
emit_aotconst_full (MonoCompile *cfg, MonoJumpInfo **ji, guint8 *code, guint8 *start, int dreg, guint32 patch_type, gconstpointer data)
{
if (cfg)
mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
else
*ji = mono_patch_info_list_prepend (*ji, code - start, (MonoJumpInfoType)patch_type, data);
/* See arch_emit_got_access () in aot-compiler.c */
arm_ldrx_lit (code, dreg, 0);
arm_nop (code);
arm_nop (code);
return code;
}
static guint8*
emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, guint32 patch_type, gconstpointer data)
{
return emit_aotconst_full (cfg, NULL, code, NULL, dreg, patch_type, data);
}
/*
* mono_arm_emit_aotconst:
*
* Emit code to load an AOT constant into DREG. Usable from trampolines.
*/
guint8*
mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *code_start, int dreg, guint32 patch_type, gconstpointer data)
{
return emit_aotconst_full (NULL, (MonoJumpInfo**)ji, code, code_start, dreg, patch_type, data);
}
gboolean
mono_arch_have_fast_tls (void)
{
#ifdef TARGET_IOS
return FALSE;
#else
return TRUE;
#endif
}
static guint8*
emit_tls_get (guint8 *code, int dreg, int tls_offset)
{
arm_mrs (code, dreg, ARM_MRS_REG_TPIDR_EL0);
if (tls_offset < 256) {
arm_ldrx (code, dreg, dreg, tls_offset);
} else {
code = emit_addx_imm (code, dreg, dreg, tls_offset);
arm_ldrx (code, dreg, dreg, 0);
}
return code;
}
static guint8*
emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
int tmpreg = ARMREG_IP0;
g_assert (sreg != tmpreg);
arm_mrs (code, tmpreg, ARM_MRS_REG_TPIDR_EL0);
if (tls_offset < 256) {
arm_strx (code, sreg, tmpreg, tls_offset);
} else {
code = emit_addx_imm (code, tmpreg, tmpreg, tls_offset);
arm_strx (code, sreg, tmpreg, 0);
}
return code;
}
/*
* Emits
* - mov sp, fp
* - ldrp [fp, lr], [sp], !stack_offfset
* Clobbers TEMP_REGS.
*/
WARN_UNUSED_RESULT guint8*
mono_arm_emit_destroy_frame (guint8 *code, int stack_offset, guint64 temp_regs)
{
// At least one of these registers must be available, or both.
gboolean const temp0 = (temp_regs & (1 << ARMREG_IP0)) != 0;
gboolean const temp1 = (temp_regs & (1 << ARMREG_IP1)) != 0;
g_assert (temp0 || temp1);
int const temp = temp0 ? ARMREG_IP0 : ARMREG_IP1;
arm_movspx (code, ARMREG_SP, ARMREG_FP);
if (arm_is_ldpx_imm (stack_offset)) {
arm_ldpx_post (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, stack_offset);
} else {
arm_ldpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0);
/* sp += stack_offset */
if (temp0 && temp1) {
code = emit_addx_sp_imm (code, stack_offset);
} else {
int imm = stack_offset;
/* Can't use addx_sp_imm () since we can't clobber both ip0/ip1 */
arm_addx_imm (code, temp, ARMREG_SP, 0);
while (imm > 256) {
arm_addx_imm (code, temp, temp, 256);
imm -= 256;
}
arm_addx_imm (code, ARMREG_SP, temp, imm);
}
}
return code;
}
#define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
static guint8*
emit_thunk (guint8 *code, gconstpointer target)
{
guint8 *p = code;
arm_ldrx_lit (code, ARMREG_IP0, code + 8);
arm_brx (code, ARMREG_IP0);
*(guint64*)code = (guint64)target;
code += sizeof (guint64);
mono_arch_flush_icache (p, code - p);
return code;
}
static gpointer
create_thunk (MonoCompile *cfg, guchar *code, const guchar *target)
{
MonoJitInfo *ji;
MonoThunkJitInfo *info;
guint8 *thunks, *p;
int thunks_size;
guint8 *orig_target;
guint8 *target_thunk;
MonoJitMemoryManager* jit_mm;
if (cfg) {
/*
* This can be called multiple times during JITting,
* save the current position in cfg->arch to avoid
* doing a O(n^2) search.
*/
if (!cfg->arch.thunks) {
cfg->arch.thunks = cfg->thunks;
cfg->arch.thunks_size = cfg->thunk_area;
}
thunks = cfg->arch.thunks;
thunks_size = cfg->arch.thunks_size;
if (!thunks_size) {
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
g_assert_not_reached ();
}
g_assert (*(guint32*)thunks == 0);
emit_thunk (thunks, target);
cfg->arch.thunks += THUNK_SIZE;
cfg->arch.thunks_size -= THUNK_SIZE;
return thunks;
} else {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = mono_jit_info_get_thunk_info (ji);
g_assert (info);
thunks = (guint8*)ji->code_start + info->thunks_offset;
thunks_size = info->thunks_size;
orig_target = mono_arch_get_call_target (code + 4);
/* Arbitrary lock */
jit_mm = get_default_jit_mm ();
jit_mm_lock (jit_mm);
target_thunk = NULL;
if (orig_target >= thunks && orig_target < thunks + thunks_size) {
/* The call already points to a thunk, because of trampolines etc. */
target_thunk = orig_target;
} else {
for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
if (((guint32*)p) [0] == 0) {
/* Free entry */
target_thunk = p;
break;
} else if (((guint64*)p) [1] == (guint64)target) {
/* Thunk already points to target */
target_thunk = p;
break;
}
}
}
//printf ("THUNK: %p %p %p\n", code, target, target_thunk);
if (!target_thunk) {
jit_mm_unlock (jit_mm);
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
g_assert_not_reached ();
}
emit_thunk (target_thunk, target);
jit_mm_unlock (jit_mm);
return target_thunk;
}
}
static void
arm_patch_full (MonoCompile *cfg, guint8 *code, guint8 *target, int relocation)
{
switch (relocation) {
case MONO_R_ARM64_B:
target = MINI_FTNPTR_TO_ADDR (target);
if (arm_is_bl_disp (code, target)) {
arm_b (code, target);
} else {
gpointer thunk;
thunk = create_thunk (cfg, code, target);
g_assert (arm_is_bl_disp (code, thunk));
arm_b (code, thunk);
}
break;
case MONO_R_ARM64_BCC: {
int cond;
cond = arm_get_bcc_cond (code);
arm_bcc (code, cond, target);
break;
}
case MONO_R_ARM64_CBZ:
arm_set_cbz_target (code, target);
break;
case MONO_R_ARM64_IMM: {
guint64 imm = (guint64)target;
int dreg;
/* emit_imm64_template () */
dreg = arm_get_movzx_rd (code);
arm_movzx (code, dreg, imm & 0xffff, 0);
arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16);
arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32);
arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48);
break;
}
case MONO_R_ARM64_BL:
target = MINI_FTNPTR_TO_ADDR (target);
if (arm_is_bl_disp (code, target)) {
arm_bl (code, target);
} else {
gpointer thunk;
thunk = create_thunk (cfg, code, target);
g_assert (arm_is_bl_disp (code, thunk));
arm_bl (code, thunk);
}
break;
default:
g_assert_not_reached ();
}
}
static void
arm_patch_rel (guint8 *code, guint8 *target, int relocation)
{
arm_patch_full (NULL, code, target, relocation);
}
void
mono_arm_patch (guint8 *code, guint8 *target, int relocation)
{
arm_patch_rel (code, target, relocation);
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
guint8 *ip;
ip = ji->ip.i + code;
switch (ji->type) {
case MONO_PATCH_INFO_METHOD_JUMP:
/* ji->relocation is not set by the caller */
arm_patch_full (cfg, ip, (guint8*)target, MONO_R_ARM64_B);
mono_arch_flush_icache (ip, 8);
break;
default:
arm_patch_full (cfg, ip, (guint8*)target, ji->relocation);
break;
case MONO_PATCH_INFO_NONE:
break;
}
}
void
mono_arch_flush_register_windows (void)
{
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod*)regs [MONO_ARCH_RGCTX_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*)regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
return l;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->regs [reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->regs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->regs [reg] = val;
}
/*
* mono_arch_set_target:
*
* Set the target architecture the JIT backend should generate code for, in the form
* of a GNU target triplet. Only used in AOT mode.
*/
void
mono_arch_set_target (char *mtriple)
{
if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
ios_abi = TRUE;
}
}
static void
add_general (CallInfo *cinfo, ArgInfo *ainfo, int size, gboolean sign)
{
if (cinfo->gr >= PARAM_REGS) {
ainfo->storage = ArgOnStack;
/*
* FIXME: The vararg argument handling code in ves_icall_System_ArgIterator_IntGetNextArg
* assumes every argument is allocated to a separate full size stack slot.
*/
if (ios_abi && !cinfo->vararg) {
/* Assume size == align */
} else {
/* Put arguments into 8 byte aligned stack slots */
size = 8;
sign = FALSE;
}
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size);
ainfo->offset = cinfo->stack_usage;
ainfo->slot_size = size;
ainfo->sign = sign;
cinfo->stack_usage += size;
} else {
ainfo->storage = ArgInIReg;
ainfo->reg = cinfo->gr;
cinfo->gr ++;
}
}
static void
add_fp (CallInfo *cinfo, ArgInfo *ainfo, gboolean single)
{
int size = single ? 4 : 8;
if (cinfo->fr >= FP_PARAM_REGS) {
ainfo->storage = single ? ArgOnStackR4 : ArgOnStackR8;
if (ios_abi) {
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size);
ainfo->offset = cinfo->stack_usage;
ainfo->slot_size = size;
cinfo->stack_usage += size;
} else {
ainfo->offset = cinfo->stack_usage;
ainfo->slot_size = 8;
/* Put arguments into 8 byte aligned stack slots */
cinfo->stack_usage += 8;
}
} else {
if (single)
ainfo->storage = ArgInFRegR4;
else
ainfo->storage = ArgInFReg;
ainfo->reg = cinfo->fr;
cinfo->fr ++;
}
}
static gboolean
is_hfa (MonoType *t, int *out_nfields, int *out_esize, int *field_offsets)
{
MonoClass *klass;
gpointer iter;
MonoClassField *field;
MonoType *ftype, *prev_ftype = NULL;
int i, nfields = 0;
klass = mono_class_from_mono_type_internal (t);
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
ftype = mono_field_get_type_internal (field);
ftype = mini_get_underlying_type (ftype);
if (MONO_TYPE_ISSTRUCT (ftype)) {
int nested_nfields, nested_esize;
int nested_field_offsets [16];
if (!is_hfa (ftype, &nested_nfields, &nested_esize, nested_field_offsets))
return FALSE;
if (nested_esize == 4)
ftype = m_class_get_byval_arg (mono_defaults.single_class);
else
ftype = m_class_get_byval_arg (mono_defaults.double_class);
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
for (i = 0; i < nested_nfields; ++i) {
if (nfields + i < 4)
field_offsets [nfields + i] = field->offset - MONO_ABI_SIZEOF (MonoObject) + nested_field_offsets [i];
}
nfields += nested_nfields;
} else {
if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
return FALSE;
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
if (nfields < 4)
field_offsets [nfields] = field->offset - MONO_ABI_SIZEOF (MonoObject);
nfields ++;
}
}
if (nfields == 0 || nfields > 4)
return FALSE;
*out_nfields = nfields;
*out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
return TRUE;
}
static void
add_valuetype (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t)
{
int i, size, align_size, nregs, nfields, esize;
int field_offsets [16];
guint32 align;
size = mini_type_stack_size_full (t, &align, cinfo->pinvoke);
align_size = ALIGN_TO (size, 8);
nregs = align_size / 8;
if (is_hfa (t, &nfields, &esize, field_offsets)) {
/*
* The struct might include nested float structs aligned at 8,
* so need to keep track of the offsets of the individual fields.
*/
if (cinfo->fr + nfields <= FP_PARAM_REGS) {
ainfo->storage = ArgHFA;
ainfo->reg = cinfo->fr;
ainfo->nregs = nfields;
ainfo->size = size;
ainfo->esize = esize;
for (i = 0; i < nfields; ++i)
ainfo->foffsets [i] = field_offsets [i];
cinfo->fr += ainfo->nregs;
} else {
ainfo->nfregs_to_skip = FP_PARAM_REGS > cinfo->fr ? FP_PARAM_REGS - cinfo->fr : 0;
cinfo->fr = FP_PARAM_REGS;
size = ALIGN_TO (size, 8);
ainfo->storage = ArgVtypeOnStack;
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
ainfo->offset = cinfo->stack_usage;
ainfo->size = size;
ainfo->hfa = TRUE;
ainfo->nregs = nfields;
ainfo->esize = esize;
cinfo->stack_usage += size;
}
return;
}
if (align_size > 16) {
ainfo->storage = ArgVtypeByRef;
ainfo->size = size;
return;
}
if (cinfo->gr + nregs > PARAM_REGS) {
size = ALIGN_TO (size, 8);
ainfo->storage = ArgVtypeOnStack;
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
ainfo->offset = cinfo->stack_usage;
ainfo->size = size;
cinfo->stack_usage += size;
cinfo->gr = PARAM_REGS;
} else {
ainfo->storage = ArgVtypeInIRegs;
ainfo->reg = cinfo->gr;
ainfo->nregs = nregs;
ainfo->size = size;
cinfo->gr += nregs;
}
}
static void
add_param (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t)
{
MonoType *ptype;
ptype = mini_get_underlying_type (t);
switch (ptype->type) {
case MONO_TYPE_I1:
add_general (cinfo, ainfo, 1, TRUE);
break;
case MONO_TYPE_U1:
add_general (cinfo, ainfo, 1, FALSE);
break;
case MONO_TYPE_I2:
add_general (cinfo, ainfo, 2, TRUE);
break;
case MONO_TYPE_U2:
add_general (cinfo, ainfo, 2, FALSE);
break;
#ifdef MONO_ARCH_ILP32
case MONO_TYPE_I:
#endif
case MONO_TYPE_I4:
add_general (cinfo, ainfo, 4, TRUE);
break;
#ifdef MONO_ARCH_ILP32
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
#endif
case MONO_TYPE_U4:
add_general (cinfo, ainfo, 4, FALSE);
break;
#ifndef MONO_ARCH_ILP32
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
#endif
case MONO_TYPE_U8:
case MONO_TYPE_I8:
add_general (cinfo, ainfo, 8, FALSE);
break;
case MONO_TYPE_R8:
add_fp (cinfo, ainfo, FALSE);
break;
case MONO_TYPE_R4:
add_fp (cinfo, ainfo, TRUE);
break;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
add_valuetype (cinfo, ainfo, ptype);
break;
case MONO_TYPE_VOID:
ainfo->storage = ArgNone;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (cinfo, ainfo, 8, FALSE);
} else if (mini_is_gsharedvt_variable_type (ptype)) {
/*
* Treat gsharedvt arguments as large vtypes
*/
ainfo->storage = ArgVtypeByRef;
ainfo->gsharedvt = TRUE;
} else {
add_valuetype (cinfo, ainfo, ptype);
}
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (ptype));
ainfo->storage = ArgVtypeByRef;
ainfo->gsharedvt = TRUE;
break;
default:
g_assert_not_reached ();
break;
}
}
/*
* get_call_info:
*
* Obtain information about a call according to the calling convention.
*/
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
CallInfo *cinfo;
ArgInfo *ainfo;
int n, pstart, pindex;
n = sig->hasthis + sig->param_count;
if (mp)
cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
cinfo->pinvoke = sig->pinvoke;
// Constrain this to OSX only for now
#ifdef TARGET_OSX
cinfo->vararg = sig->call_convention == MONO_CALL_VARARG;
#endif
/* Return value */
add_param (cinfo, &cinfo->ret, sig->ret);
if (cinfo->ret.storage == ArgVtypeByRef)
cinfo->ret.reg = ARMREG_R8;
/* Reset state */
cinfo->gr = 0;
cinfo->fr = 0;
cinfo->stack_usage = 0;
/* Parameters */
if (sig->hasthis)
add_general (cinfo, cinfo->args + 0, 8, FALSE);
pstart = 0;
for (pindex = pstart; pindex < sig->param_count; ++pindex) {
ainfo = cinfo->args + sig->hasthis + pindex;
if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
cinfo->gr = PARAM_REGS;
cinfo->fr = FP_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ());
}
add_param (cinfo, ainfo, sig->params [pindex]);
if (ainfo->storage == ArgVtypeByRef) {
/* Pass the argument address in the next register */
if (cinfo->gr >= PARAM_REGS) {
ainfo->storage = ArgVtypeByRefOnStack;
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8);
ainfo->offset = cinfo->stack_usage;
cinfo->stack_usage += 8;
} else {
ainfo->reg = cinfo->gr;
cinfo->gr ++;
}
}
}
/* Handle the case where there are no implicit arguments */
if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
cinfo->gr = PARAM_REGS;
cinfo->fr = FP_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ());
}
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
return cinfo;
}
static int
arg_need_temp (ArgInfo *ainfo)
{
if (ainfo->storage == ArgHFA && ainfo->esize == 4)
return ainfo->size;
return 0;
}
static gpointer
arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
{
switch (ainfo->storage) {
case ArgVtypeInIRegs:
case ArgInIReg:
return &ccontext->gregs [ainfo->reg];
case ArgInFReg:
case ArgInFRegR4:
case ArgHFA:
return &ccontext->fregs [ainfo->reg];
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
case ArgVtypeOnStack:
return ccontext->stack + ainfo->offset;
case ArgVtypeByRef:
return (gpointer) ccontext->gregs [ainfo->reg];
default:
g_error ("Arg storage type not yet supported");
}
}
static void
arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
g_assert (arg_need_temp (ainfo));
float *dest_float = (float*)dest;
for (int k = 0; k < ainfo->nregs; k++) {
*dest_float = *(float*)&ccontext->fregs [ainfo->reg + k];
dest_float++;
}
}
static void
arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
{
g_assert (arg_need_temp (ainfo));
float *src_float = (float*)src;
for (int k = 0; k < ainfo->nregs; k++) {
*(float*)&ccontext->fregs [ainfo->reg + k] = *src_float;
src_float++;
}
}
/* Set arguments in the ccontext (for i2n entry) */
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
memset (ccontext, 0, sizeof (CallContext));
ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
if (ccontext->stack_size)
ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgVtypeByRef) {
storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
ccontext->gregs [cinfo->ret.reg] = (gsize)storage;
}
}
g_assert (!sig->hasthis);
for (int i = 0; i < sig->param_count; i++) {
ainfo = &cinfo->args [i];
if (ainfo->storage == ArgVtypeByRef) {
ccontext->gregs [ainfo->reg] = (host_mgreg_t)interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, i);
continue;
}
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size); // FIXME? alloca in a loop
else
storage = arg_get_storage (ccontext, ainfo);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
/* Set return value in the ccontext (for n2i return) */
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
gpointer storage;
ArgInfo *ainfo;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (retp) {
g_assert (ainfo->storage == ArgVtypeByRef);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp);
} else {
g_assert (ainfo->storage != ArgVtypeByRef);
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size);
else
storage = arg_get_storage (ccontext, ainfo);
memset (ccontext, 0, sizeof (CallContext)); // FIXME
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
/* Gets the arguments from ccontext (for n2i entry) */
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
ainfo = &cinfo->args [i];
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size); // FIXME? alloca in a loop
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
}
storage = NULL;
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgVtypeByRef)
storage = (gpointer) ccontext->gregs [cinfo->ret.reg];
}
g_free (cinfo);
return storage;
}
/* Gets the return value from ccontext (for i2n exit) */
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
ArgInfo *ainfo;
gpointer storage;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (ainfo->storage != ArgVtypeByRef) {
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size);
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
typedef struct {
MonoMethodSignature *sig;
CallInfo *cinfo;
MonoType *rtype;
MonoType **param_types;
int n_fpargs, n_fpret, nullable_area;
} ArchDynCallInfo;
static gboolean
dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
{
int i;
// FIXME: Add more cases
switch (cinfo->ret.storage) {
case ArgNone:
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
case ArgVtypeByRef:
break;
case ArgVtypeInIRegs:
if (cinfo->ret.nregs > 2)
return FALSE;
break;
case ArgHFA:
break;
default:
return FALSE;
}
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
switch (ainfo->storage) {
case ArgInIReg:
case ArgVtypeInIRegs:
case ArgInFReg:
case ArgInFRegR4:
case ArgHFA:
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
case ArgOnStack:
case ArgVtypeOnStack:
break;
default:
return FALSE;
}
}
return TRUE;
}
MonoDynCallInfo*
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
ArchDynCallInfo *info;
CallInfo *cinfo;
int i, aindex;
cinfo = get_call_info (NULL, sig);
if (!dyn_call_supported (cinfo, sig)) {
g_free (cinfo);
return NULL;
}
info = g_new0 (ArchDynCallInfo, 1);
// FIXME: Preprocess the info to speed up start_dyn_call ()
info->sig = sig;
info->cinfo = cinfo;
info->rtype = mini_get_underlying_type (sig->ret);
info->param_types = g_new0 (MonoType*, sig->param_count);
for (i = 0; i < sig->param_count; ++i)
info->param_types [i] = mini_get_underlying_type (sig->params [i]);
switch (cinfo->ret.storage) {
case ArgInFReg:
case ArgInFRegR4:
info->n_fpret = 1;
break;
case ArgHFA:
info->n_fpret = cinfo->ret.nregs;
break;
default:
break;
}
for (aindex = 0; aindex < sig->param_count; aindex++) {
MonoType *t = info->param_types [aindex];
if (m_type_is_byref (t))
continue;
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
int size;
/* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */
size = mono_class_value_size (klass, NULL);
info->nullable_area += size;
}
break;
default:
break;
}
}
return (MonoDynCallInfo*)info;
}
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_free (ainfo->cinfo);
g_free (ainfo->param_types);
g_free (ainfo);
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage + ainfo->nullable_area;
}
static double
bitcast_r4_to_r8 (float f)
{
float *p = &f;
return *(double*)p;
}
static float
bitcast_r8_to_r4 (double f)
{
double *p = &f;
return *(float*)p;
}
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
int aindex, arg_index, greg, i, pindex;
MonoMethodSignature *sig = dinfo->sig;
CallInfo *cinfo = dinfo->cinfo;
int buffer_offset = 0;
guint8 *nullable_buffer;
p->res = 0;
p->ret = ret;
p->n_fpargs = dinfo->n_fpargs;
p->n_fpret = dinfo->n_fpret;
p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
arg_index = 0;
greg = 0;
pindex = 0;
/* Stored after the stack arguments */
nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + 1 + (cinfo->stack_usage / sizeof (host_mgreg_t))]);
if (sig->hasthis)
p->regs [greg ++] = (host_mgreg_t)*(args [arg_index ++]);
if (cinfo->ret.storage == ArgVtypeByRef)
p->regs [ARMREG_R8] = (host_mgreg_t)ret;
for (aindex = pindex; aindex < sig->param_count; aindex++) {
MonoType *t = dinfo->param_types [aindex];
gpointer *arg = args [arg_index ++];
ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis];
int slot = -1;
if (ainfo->storage == ArgOnStack || ainfo->storage == ArgVtypeOnStack || ainfo->storage == ArgVtypeByRefOnStack) {
slot = PARAM_REGS + 1 + (ainfo->offset / sizeof (host_mgreg_t));
} else {
slot = ainfo->reg;
}
if (m_type_is_byref (t)) {
p->regs [slot] = (host_mgreg_t)*arg;
continue;
}
if (ios_abi && ainfo->storage == ArgOnStack) {
guint8 *stack_arg = (guint8*)&(p->regs [PARAM_REGS + 1]) + ainfo->offset;
gboolean handled = TRUE;
/* Special case arguments smaller than 1 machine word */
switch (t->type) {
case MONO_TYPE_U1:
*(guint8*)stack_arg = *(guint8*)arg;
break;
case MONO_TYPE_I1:
*(gint8*)stack_arg = *(gint8*)arg;
break;
case MONO_TYPE_U2:
*(guint16*)stack_arg = *(guint16*)arg;
break;
case MONO_TYPE_I2:
*(gint16*)stack_arg = *(gint16*)arg;
break;
case MONO_TYPE_I4:
*(gint32*)stack_arg = *(gint32*)arg;
break;
case MONO_TYPE_U4:
*(guint32*)stack_arg = *(guint32*)arg;
break;
default:
handled = FALSE;
break;
}
if (handled)
continue;
}
switch (t->type) {
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
p->regs [slot] = (host_mgreg_t)*arg;
break;
case MONO_TYPE_U1:
p->regs [slot] = *(guint8*)arg;
break;
case MONO_TYPE_I1:
p->regs [slot] = *(gint8*)arg;
break;
case MONO_TYPE_I2:
p->regs [slot] = *(gint16*)arg;
break;
case MONO_TYPE_U2:
p->regs [slot] = *(guint16*)arg;
break;
case MONO_TYPE_I4:
p->regs [slot] = *(gint32*)arg;
break;
case MONO_TYPE_U4:
p->regs [slot] = *(guint32*)arg;
break;
case MONO_TYPE_R4:
p->fpregs [ainfo->reg] = bitcast_r4_to_r8 (*(float*)arg);
p->n_fpargs ++;
break;
case MONO_TYPE_R8:
p->fpregs [ainfo->reg] = *(double*)arg;
p->n_fpargs ++;
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
p->regs [slot] = (host_mgreg_t)*arg;
break;
} else {
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
guint8 *nullable_buf;
int size;
/*
* Use p->buffer as a temporary buffer since the data needs to be available after this call
* if the nullable param is passed by ref.
*/
size = mono_class_value_size (klass, NULL);
nullable_buf = nullable_buffer + buffer_offset;
buffer_offset += size;
g_assert (buffer_offset <= dinfo->nullable_area);
/* The argument pointed to by arg is either a boxed vtype or null */
mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
arg = (gpointer*)nullable_buf;
/* Fall though */
} else {
/* Fall though */
}
}
case MONO_TYPE_VALUETYPE:
switch (ainfo->storage) {
case ArgVtypeInIRegs:
for (i = 0; i < ainfo->nregs; ++i)
p->regs [slot ++] = ((host_mgreg_t*)arg) [i];
break;
case ArgHFA:
if (ainfo->esize == 4) {
for (i = 0; i < ainfo->nregs; ++i)
p->fpregs [ainfo->reg + i] = bitcast_r4_to_r8 (((float*)arg) [ainfo->foffsets [i] / 4]);
} else {
for (i = 0; i < ainfo->nregs; ++i)
p->fpregs [ainfo->reg + i] = ((double*)arg) [ainfo->foffsets [i] / 8];
}
p->n_fpargs += ainfo->nregs;
break;
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
p->regs [slot] = (host_mgreg_t)arg;
break;
case ArgVtypeOnStack:
for (i = 0; i < ainfo->size / 8; ++i)
p->regs [slot ++] = ((host_mgreg_t*)arg) [i];
break;
default:
g_assert_not_reached ();
break;
}
break;
default:
g_assert_not_reached ();
}
}
}
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
CallInfo *cinfo = ainfo->cinfo;
DynCallArgs *args = (DynCallArgs*)buf;
MonoType *ptype = ainfo->rtype;
guint8 *ret = args->ret;
host_mgreg_t res = args->res;
host_mgreg_t res2 = args->res2;
int i;
if (cinfo->ret.storage == ArgVtypeByRef)
return;
switch (ptype->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
case MONO_TYPE_OBJECT:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
*(gpointer*)ret = (gpointer)res;
break;
case MONO_TYPE_I1:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
*(gint32*)ret = res;
break;
case MONO_TYPE_U4:
*(guint32*)ret = res;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
*(guint64*)ret = res;
break;
case MONO_TYPE_R4:
*(float*)ret = bitcast_r8_to_r4 (args->fpregs [0]);
break;
case MONO_TYPE_R8:
*(double*)ret = args->fpregs [0];
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (ptype)) {
*(gpointer*)ret = (gpointer)res;
break;
} else {
/* Fall though */
}
case MONO_TYPE_VALUETYPE:
switch (ainfo->cinfo->ret.storage) {
case ArgVtypeInIRegs:
*(host_mgreg_t*)ret = res;
if (ainfo->cinfo->ret.nregs > 1)
((host_mgreg_t*)ret) [1] = res2;
break;
case ArgHFA:
/* Use the same area for returning fp values */
if (cinfo->ret.esize == 4) {
for (i = 0; i < cinfo->ret.nregs; ++i)
((float*)ret) [cinfo->ret.foffsets [i] / 4] = bitcast_r8_to_r4 (args->fpregs [i]);
} else {
for (i = 0; i < cinfo->ret.nregs; ++i)
((double*)ret) [cinfo->ret.foffsets [i] / 8] = args->fpregs [i];
}
break;
default:
g_assert_not_reached ();
break;
}
break;
default:
g_assert_not_reached ();
}
}
#if __APPLE__
G_BEGIN_DECLS
void sys_icache_invalidate (void *start, size_t len);
G_END_DECLS
#endif
void
mono_arch_flush_icache (guint8 *code, gint size)
{
#ifndef MONO_CROSS_COMPILE
#if __APPLE__
sys_icache_invalidate (code, size);
#else
/* Don't rely on GCC's __clear_cache implementation, as it caches
* icache/dcache cache line sizes, that can vary between cores on
* big.LITTLE architectures. */
guint64 end = (guint64) (code + size);
guint64 addr;
/* always go with cacheline size of 4 bytes as this code isn't perf critical
* anyway. Reading the cache line size from a machine register can be racy
* on a big.LITTLE architecture if the cores don't have the same cache line
* sizes. */
const size_t icache_line_size = 4;
const size_t dcache_line_size = 4;
addr = (guint64) code & ~(guint64) (dcache_line_size - 1);
for (; addr < end; addr += dcache_line_size)
asm volatile("dc civac, %0" : : "r" (addr) : "memory");
asm volatile("dsb ish" : : : "memory");
addr = (guint64) code & ~(guint64) (icache_line_size - 1);
for (; addr < end; addr += icache_line_size)
asm volatile("ic ivau, %0" : : "r" (addr) : "memory");
asm volatile ("dsb ish" : : : "memory");
asm volatile ("isb" : : : "memory");
#endif
#endif
}
#ifndef DISABLE_JIT
gboolean
mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
{
NOT_IMPLEMENTED;
return FALSE;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
if (mono_is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = g_list_prepend (vars, vmv);
}
}
vars = mono_varlist_sort (cfg, vars, 0);
return vars;
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
int i;
/* r28 is reserved for cfg->arch.args_reg */
/* r27 is reserved for the imt argument */
for (i = ARMREG_R19; i <= ARMREG_R26; ++i)
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
return regs;
}
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
MonoInst *ins = cfg->varinfo [vmv->idx];
if (ins->opcode == OP_ARG)
return 1;
else
return 2;
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == ArgVtypeByRef) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
cfg->vret_addr->flags |= MONO_INST_VOLATILE;
}
if (cfg->gen_sdb_seq_points) {
MonoInst *ins;
if (cfg->compile_aot) {
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
}
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_tramp_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.bp_tramp_var = ins;
}
if (cfg->method->save_lmf) {
cfg->create_lmf_var = TRUE;
cfg->lmf_ir = TRUE;
}
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoInst *ins;
CallInfo *cinfo;
ArgInfo *ainfo;
int i, offset, size, align;
guint32 locals_stack_size, locals_stack_align;
gint32 *offsets;
/*
* Allocate arguments and locals to either register (OP_REGVAR) or to a stack slot (OP_REGOFFSET).
* Compute cfg->stack_offset and update cfg->used_int_regs.
*/
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*
* The ARM64 ABI always uses a frame pointer.
* The instruction set prefers positive offsets, so fp points to the bottom of the
* frame, and stack slots are at positive offsets.
* If some arguments are received on the stack, their offsets relative to fp can
* not be computed right now because the stack frame might grow due to spilling
* done by the local register allocator. To solve this, we reserve a register
* which points to them.
* The stack frame looks like this:
* args_reg -> <bottom of parent frame>
* <locals etc>
* fp -> <saved fp+lr>
* sp -> <localloc/params area>
*/
cfg->frame_reg = ARMREG_FP;
cfg->flags |= MONO_CFG_HAS_SPILLUP;
offset = 0;
/* Saved fp+lr */
offset += 16;
if (cinfo->stack_usage) {
g_assert (!(cfg->used_int_regs & (1 << ARMREG_R28)));
cfg->arch.args_reg = ARMREG_R28;
cfg->used_int_regs |= 1 << ARMREG_R28;
}
if (cfg->method->save_lmf) {
/* The LMF var is allocated normally */
} else {
/* Callee saved regs */
cfg->arch.saved_gregs_offset = offset;
for (i = 0; i < 32; ++i)
if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) && (cfg->used_int_regs & (1 << i)))
offset += 8;
}
/* Return value */
switch (cinfo->ret.storage) {
case ArgNone:
break;
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->dreg = cinfo->ret.reg;
break;
case ArgVtypeInIRegs:
case ArgHFA:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = cfg->frame_reg;
cfg->ret->inst_offset = offset;
if (cinfo->ret.storage == ArgHFA)
// FIXME:
offset += 64;
else
offset += 16;
break;
case ArgVtypeByRef:
/* This variable will be initalized in the prolog from R8 */
cfg->vret_addr->opcode = OP_REGOFFSET;
cfg->vret_addr->inst_basereg = cfg->frame_reg;
cfg->vret_addr->inst_offset = offset;
offset += 8;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr =");
mono_print_ins (cfg->vret_addr);
}
break;
default:
g_assert_not_reached ();
break;
}
/* Arguments */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ainfo = cinfo->args + i;
ins = cfg->args [i];
if (ins->opcode == OP_REGVAR)
continue;
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
// FIXME: Use nregs/size
/* These will be copied to the stack in the prolog */
ins->inst_offset = offset;
offset += 8;
break;
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
case ArgVtypeOnStack:
/* These are in the parent frame */
g_assert (cfg->arch.args_reg);
ins->inst_basereg = cfg->arch.args_reg;
ins->inst_offset = ainfo->offset;
break;
case ArgVtypeInIRegs:
case ArgHFA:
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
ins->inst_offset = offset;
if (cfg->verbose_level >= 2)
printf ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
if (ainfo->storage == ArgHFA)
// FIXME:
offset += 64;
else
offset += 16;
break;
case ArgVtypeByRefOnStack: {
MonoInst *vtaddr;
if (ainfo->gsharedvt) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->arch.args_reg;
ins->inst_offset = ainfo->offset;
break;
}
/* The vtype address is in the parent frame */
g_assert (cfg->arch.args_reg);
MONO_INST_NEW (cfg, vtaddr, 0);
vtaddr->opcode = OP_REGOFFSET;
vtaddr->inst_basereg = cfg->arch.args_reg;
vtaddr->inst_offset = ainfo->offset;
/* Need an indirection */
ins->opcode = OP_VTARG_ADDR;
ins->inst_left = vtaddr;
break;
}
case ArgVtypeByRef: {
MonoInst *vtaddr;
if (ainfo->gsharedvt) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += 8;
break;
}
/* The vtype address is in a register, will be copied to the stack in the prolog */
MONO_INST_NEW (cfg, vtaddr, 0);
vtaddr->opcode = OP_REGOFFSET;
vtaddr->inst_basereg = cfg->frame_reg;
vtaddr->inst_offset = offset;
offset += 8;
/* Need an indirection */
ins->opcode = OP_VTARG_ADDR;
ins->inst_left = vtaddr;
break;
}
default:
g_assert_not_reached ();
break;
}
}
/* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
// FIXME: Allocate these to registers
ins = cfg->arch.seq_point_info_var;
if (ins) {
size = 8;
align = 8;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
ins = cfg->arch.ss_tramp_var;
if (ins) {
size = 8;
align = 8;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
ins = cfg->arch.bp_tramp_var;
if (ins) {
size = 8;
align = 8;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
/* Locals */
offsets = mono_allocate_stack_slots (cfg, FALSE, &locals_stack_size, &locals_stack_align);
if (locals_stack_align)
offset = ALIGN_TO (offset, locals_stack_align);
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
if (offsets [i] != -1) {
ins = cfg->varinfo [i];
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset + offsets [i];
//printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
}
}
offset += locals_stack_size;
offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
cfg->stack_offset = offset;
}
#ifdef ENABLE_LLVM
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
LLVMCallInfo *linfo;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
linfo->ret.storage = LLVMArgNormal;
break;
case ArgNone:
linfo->ret.storage = LLVMArgNone;
break;
case ArgVtypeByRef:
linfo->ret.storage = LLVMArgVtypeByRef;
break;
//
// FIXME: This doesn't work yet since the llvm backend represents these types as an i8
// array which is returned in int regs
//
case ArgHFA:
linfo->ret.storage = LLVMArgFpStruct;
linfo->ret.nslots = cinfo->ret.nregs;
linfo->ret.esize = cinfo->ret.esize;
break;
case ArgVtypeInIRegs:
/* LLVM models this by returning an int */
linfo->ret.storage = LLVMArgVtypeAsScalar;
linfo->ret.nslots = cinfo->ret.nregs;
linfo->ret.esize = cinfo->ret.esize;
break;
default:
g_assert_not_reached ();
break;
}
for (i = 0; i < n; ++i) {
LLVMArgInfo *lainfo = &linfo->args [i];
ainfo = cinfo->args + i;
lainfo->storage = LLVMArgNone;
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
lainfo->storage = LLVMArgNormal;
break;
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
lainfo->storage = LLVMArgVtypeByRef;
break;
case ArgHFA: {
int j;
lainfo->storage = LLVMArgAsFpArgs;
lainfo->nslots = ainfo->nregs;
lainfo->esize = ainfo->esize;
for (j = 0; j < ainfo->nregs; ++j)
lainfo->pair_storage [j] = LLVMArgInFPReg;
break;
}
case ArgVtypeInIRegs:
lainfo->storage = LLVMArgAsIArgs;
lainfo->nslots = ainfo->nregs;
break;
case ArgVtypeOnStack:
if (ainfo->hfa) {
int j;
/* Same as above */
lainfo->storage = LLVMArgAsFpArgs;
lainfo->nslots = ainfo->nregs;
lainfo->esize = ainfo->esize;
lainfo->ndummy_fpargs = ainfo->nfregs_to_skip;
for (j = 0; j < ainfo->nregs; ++j)
lainfo->pair_storage [j] = LLVMArgInFPReg;
} else {
lainfo->storage = LLVMArgAsIArgs;
lainfo->nslots = ainfo->size / 8;
}
break;
default:
g_assert_not_reached ();
break;
}
}
return linfo;
}
#endif
static void
add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
{
MonoInst *ins;
switch (storage) {
case ArgInIReg:
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg_copy (cfg, arg->dreg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
break;
case ArgInFReg:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
case ArgInFRegR4:
if (COMPILE_LLVM (cfg))
MONO_INST_NEW (cfg, ins, OP_FMOVE);
else
MONO_INST_NEW (cfg, ins, OP_RMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
break;
}
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmp_sig;
int sig_reg;
if (MONO_IS_TAILCALL_OPCODE (call))
NOT_IMPLEMENTED;
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it are
* passed on the stack after the signature. So compensate by
* passing a different signature.
*/
tmp_sig = mono_metadata_signature_dup (call->signature);
tmp_sig->param_count -= call->signature->sentinelpos;
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
sig_reg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
}
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoMethodSignature *sig;
MonoInst *arg, *vtarg;
CallInfo *cinfo;
ArgInfo *ainfo;
int i;
sig = call->signature;
cinfo = get_call_info (cfg->mempool, sig);
switch (cinfo->ret.storage) {
case ArgVtypeInIRegs:
case ArgHFA:
if (MONO_IS_TAILCALL_OPCODE (call))
break;
/*
* The vtype is returned in registers, save the return area address in a local, and save the vtype into
* the location pointed to by it after call in emit_move_return_value ().
*/
if (!cfg->arch.vret_addr_loc) {
cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* Prevent it from being register allocated or optimized away */
cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
break;
case ArgVtypeByRef:
/* Pass the vtype return address in R8 */
g_assert (!MONO_IS_TAILCALL_OPCODE (call) || call->vret_var == cfg->vret_addr);
MONO_INST_NEW (cfg, vtarg, OP_MOVE);
vtarg->sreg1 = call->vret_var->dreg;
vtarg->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
break;
default:
break;
}
for (i = 0; i < cinfo->nargs; ++i) {
ainfo = cinfo->args + i;
arg = call->args [i];
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Emit the signature cookie just before the implicit arguments */
emit_sig_cookie (cfg, call, cinfo);
}
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, arg);
break;
case ArgOnStack:
switch (ainfo->slot_size) {
case 8:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case 4:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case 2:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case 1:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
default:
g_assert_not_reached ();
break;
}
break;
case ArgOnStackR8:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case ArgOnStackR4:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case ArgVtypeInIRegs:
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
case ArgVtypeOnStack:
case ArgHFA: {
MonoInst *ins;
guint32 align;
guint32 size;
size = mono_class_value_size (arg->klass, &align);
MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
ins->sreg1 = arg->dreg;
ins->klass = arg->klass;
ins->backend.size = size;
ins->inst_p0 = call;
ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
MONO_ADD_INS (cfg->cbb, ins);
break;
}
default:
g_assert_not_reached ();
break;
}
}
/* Handle the case where there are no implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (cinfo->nargs == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
call->call_info = cinfo;
call->stack_usage = cinfo->stack_usage;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
MonoInst *load;
int i;
if (ins->backend.size == 0 && !ainfo->gsharedvt)
return;
switch (ainfo->storage) {
case ArgVtypeInIRegs:
for (i = 0; i < ainfo->nregs; ++i) {
// FIXME: Smaller sizes
MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
load->dreg = mono_alloc_ireg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = i * sizeof (target_mgreg_t);
MONO_ADD_INS (cfg->cbb, load);
add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg + i, load);
}
break;
case ArgHFA:
for (i = 0; i < ainfo->nregs; ++i) {
if (ainfo->esize == 4)
MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
else
MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
load->dreg = mono_alloc_freg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = ainfo->foffsets [i];
MONO_ADD_INS (cfg->cbb, load);
add_outarg_reg (cfg, call, ainfo->esize == 4 ? ArgInFRegR4 : ArgInFReg, ainfo->reg + i, load);
}
break;
case ArgVtypeByRef:
case ArgVtypeByRefOnStack: {
MonoInst *vtaddr, *load, *arg;
/* Pass the vtype address in a reg/on the stack */
if (ainfo->gsharedvt) {
load = src;
} else {
/* Make a copy of the argument */
vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL);
MONO_INST_NEW (cfg, load, OP_LDADDR);
load->inst_p0 = vtaddr;
vtaddr->flags |= MONO_INST_INDIRECT;
load->type = STACK_MP;
load->klass = vtaddr->klass;
load->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, load);
mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, ainfo->size, 8);
}
if (ainfo->storage == ArgVtypeByRef) {
MONO_INST_NEW (cfg, arg, OP_MOVE);
arg->dreg = mono_alloc_preg (cfg);
arg->sreg1 = load->dreg;
MONO_ADD_INS (cfg->cbb, arg);
add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg, arg);
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, load->dreg);
}
break;
}
case ArgVtypeOnStack:
for (i = 0; i < ainfo->size / 8; ++i) {
MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
load->dreg = mono_alloc_ireg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = i * 8;
MONO_ADD_INS (cfg->cbb, load);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset + (i * 8), load->dreg);
}
break;
default:
g_assert_not_reached ();
break;
}
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
switch (cinfo->ret.storage) {
case ArgNone:
break;
case ArgInIReg:
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
break;
case ArgInFReg:
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
break;
case ArgInFRegR4:
if (COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
else
MONO_EMIT_NEW_UNALU (cfg, OP_RMOVE, cfg->ret->dreg, val->dreg);
break;
default:
g_assert_not_reached ();
break;
}
}
#ifndef DISABLE_JIT
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
g_assert (caller_sig);
g_assert (callee_sig);
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
&& IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
// FIXME Limit stack_usage to 1G. emit_ldrx / strx has 32bit limits.
res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30));
res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30));
// valuetype parameters are the address of a local
const ArgInfo *ainfo;
ainfo = callee_info->args + callee_sig->hasthis;
for (int i = 0; res && i < callee_sig->param_count; ++i) {
res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRef)
&& IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRefOnStack);
}
g_free (caller_info);
g_free (callee_info);
return res;
}
#endif
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return (imm >= -((gint64)1<<31) && imm <= (((gint64)1<<31)-1));
}
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
//NOT_IMPLEMENTED;
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
//NOT_IMPLEMENTED;
}
#define ADD_NEW_INS(cfg,dest,op) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *temp, *last_ins = NULL;
MONO_BB_FOR_EACH_INS (bb, ins) {
switch (ins->opcode) {
case OP_SBB:
case OP_ISBB:
case OP_SUBCC:
case OP_ISUBCC:
if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
/* ARM sets the C flag to 1 if there was _no_ overflow */
ins->next->opcode = OP_COND_EXC_NC;
break;
case OP_IDIV_IMM:
case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LREM_IMM:
mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_LOCALLOC_IMM:
if (ins->inst_imm > 32) {
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = mono_op_imm_to_op (ins->opcode);
}
break;
case OP_ICOMPARE_IMM:
if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBEQ) {
ins->next->opcode = OP_ARM64_CBZW;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
} else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBNE_UN) {
ins->next->opcode = OP_ARM64_CBNZW;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
}
break;
case OP_LCOMPARE_IMM:
case OP_COMPARE_IMM:
if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBEQ) {
ins->next->opcode = OP_ARM64_CBZX;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
} else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBNE_UN) {
ins->next->opcode = OP_ARM64_CBNZX;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
}
break;
case OP_FCOMPARE:
case OP_RCOMPARE: {
gboolean swap = FALSE;
int reg;
if (!ins->next) {
/* Optimized away */
NULLIFY_INS (ins);
break;
}
/*
* FP compares with unordered operands set the flags
* to NZCV=0011, which matches some non-unordered compares
* as well, like LE, so have to swap the operands.
*/
switch (ins->next->opcode) {
case OP_FBLT:
ins->next->opcode = OP_FBGT;
swap = TRUE;
break;
case OP_FBLE:
ins->next->opcode = OP_FBGE;
swap = TRUE;
break;
case OP_RBLT:
ins->next->opcode = OP_RBGT;
swap = TRUE;
break;
case OP_RBLE:
ins->next->opcode = OP_RBGE;
swap = TRUE;
break;
default:
break;
}
if (swap) {
reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = reg;
}
break;
}
default:
break;
}
last_ins = ins;
}
bb->last_ins = last_ins;
bb->max_vreg = cfg->next_vreg;
}
void
mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
{
}
static int
opcode_to_armcond (int opcode)
{
switch (opcode) {
case OP_IBEQ:
case OP_LBEQ:
case OP_FBEQ:
case OP_CEQ:
case OP_ICEQ:
case OP_LCEQ:
case OP_FCEQ:
case OP_RCEQ:
case OP_COND_EXC_IEQ:
case OP_COND_EXC_EQ:
return ARMCOND_EQ;
case OP_IBGE:
case OP_LBGE:
case OP_FBGE:
case OP_ICGE:
case OP_FCGE:
case OP_RCGE:
return ARMCOND_GE;
case OP_IBGT:
case OP_LBGT:
case OP_FBGT:
case OP_CGT:
case OP_ICGT:
case OP_LCGT:
case OP_FCGT:
case OP_RCGT:
case OP_COND_EXC_IGT:
case OP_COND_EXC_GT:
return ARMCOND_GT;
case OP_IBLE:
case OP_LBLE:
case OP_FBLE:
case OP_ICLE:
case OP_FCLE:
case OP_RCLE:
return ARMCOND_LE;
case OP_IBLT:
case OP_LBLT:
case OP_FBLT:
case OP_CLT:
case OP_ICLT:
case OP_LCLT:
case OP_COND_EXC_ILT:
case OP_COND_EXC_LT:
return ARMCOND_LT;
case OP_IBNE_UN:
case OP_LBNE_UN:
case OP_FBNE_UN:
case OP_ICNEQ:
case OP_FCNEQ:
case OP_RCNEQ:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_NE_UN:
return ARMCOND_NE;
case OP_IBGE_UN:
case OP_LBGE_UN:
case OP_FBGE_UN:
case OP_ICGE_UN:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_GE_UN:
return ARMCOND_HS;
case OP_IBGT_UN:
case OP_LBGT_UN:
case OP_FBGT_UN:
case OP_CGT_UN:
case OP_ICGT_UN:
case OP_LCGT_UN:
case OP_FCGT_UN:
case OP_RCGT_UN:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_GT_UN:
return ARMCOND_HI;
case OP_IBLE_UN:
case OP_LBLE_UN:
case OP_FBLE_UN:
case OP_ICLE_UN:
case OP_COND_EXC_ILE_UN:
case OP_COND_EXC_LE_UN:
return ARMCOND_LS;
case OP_IBLT_UN:
case OP_LBLT_UN:
case OP_FBLT_UN:
case OP_CLT_UN:
case OP_ICLT_UN:
case OP_LCLT_UN:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_LT_UN:
return ARMCOND_LO;
/*
* FCMP sets the NZCV condition bits as follows:
* eq = 0110
* < = 1000
* > = 0010
* unordered = 0011
* ARMCOND_LT is N!=V, so it matches unordered too, so
* fclt and fclt_un need to be special cased.
*/
case OP_FCLT:
case OP_RCLT:
/* N==1 */
return ARMCOND_MI;
case OP_FCLT_UN:
case OP_RCLT_UN:
return ARMCOND_LT;
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
return ARMCOND_CS;
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
return ARMCOND_VS;
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
return ARMCOND_CC;
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
return ARMCOND_VC;
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
return -1;
}
}
/* This clobbers LR */
static WARN_UNUSED_RESULT guint8*
emit_cond_exc (MonoCompile *cfg, guint8 *code, int opcode, const char *exc_name)
{
int cond;
cond = opcode_to_armcond (opcode);
/* Capture PC */
arm_adrx (code, ARMREG_IP1, code);
mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, exc_name, MONO_R_ARM64_BCC);
arm_bcc (code, cond, 0);
return code;
}
static guint8*
emit_move_return_value (MonoCompile *cfg, guint8 * code, MonoInst *ins)
{
CallInfo *cinfo;
MonoCallInst *call;
call = (MonoCallInst*)ins;
cinfo = call->call_info;
g_assert (cinfo);
switch (cinfo->ret.storage) {
case ArgNone:
break;
case ArgInIReg:
/* LLVM compiled code might only set the bottom bits */
if (call->signature && mini_get_underlying_type (call->signature->ret)->type == MONO_TYPE_I4)
arm_sxtwx (code, call->inst.dreg, cinfo->ret.reg);
else if (call->inst.dreg != cinfo->ret.reg)
arm_movx (code, call->inst.dreg, cinfo->ret.reg);
break;
case ArgInFReg:
if (call->inst.dreg != cinfo->ret.reg)
arm_fmovd (code, call->inst.dreg, cinfo->ret.reg);
break;
case ArgInFRegR4:
arm_fmovs (code, call->inst.dreg, cinfo->ret.reg);
break;
case ArgVtypeInIRegs: {
MonoInst *loc = cfg->arch.vret_addr_loc;
int i;
/* Load the destination address */
g_assert (loc && loc->opcode == OP_REGOFFSET);
code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
for (i = 0; i < cinfo->ret.nregs; ++i)
arm_strx (code, cinfo->ret.reg + i, ARMREG_LR, i * 8);
break;
}
case ArgHFA: {
MonoInst *loc = cfg->arch.vret_addr_loc;
int i;
/* Load the destination address */
g_assert (loc && loc->opcode == OP_REGOFFSET);
code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
arm_strfpw (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]);
else
arm_strfpx (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]);
}
break;
}
case ArgVtypeByRef:
break;
default:
g_assert_not_reached ();
break;
}
return code;
}
/*
* emit_branch_island:
*
* Emit a branch island for the conditional branches from cfg->native_code + start_offset to code.
*/
static guint8*
emit_branch_island (MonoCompile *cfg, guint8 *code, int start_offset)
{
MonoJumpInfo *ji;
/* Iterate over the patch infos added so far by this bb */
int island_size = 0;
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->ip.i < start_offset)
/* The patch infos are in reverse order, so this means the end */
break;
if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ)
island_size += 4;
}
if (island_size) {
code = realloc_code (cfg, island_size);
/* Branch over the island */
arm_b (code, code + 4 + island_size);
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->ip.i < start_offset)
break;
if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ) {
/* Rewrite the cond branch so it branches to an unconditional branch in the branch island */
arm_patch_rel (cfg->native_code + ji->ip.i, code, ji->relocation);
/* Rewrite the patch so it points to the unconditional branch */
ji->ip.i = code - cfg->native_code;
ji->relocation = MONO_R_ARM64_B;
arm_b (code, code);
}
}
set_code_cursor (cfg, code);
}
return code;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
int start_offset, max_len, dreg, sreg1, sreg2;
target_mgreg_t imm;
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
start_offset = code - cfg->native_code;
g_assert (start_offset <= cfg->code_size);
MONO_BB_FOR_EACH_INS (bb, ins) {
guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
if (G_UNLIKELY (cfg->arch.cond_branch_islands && offset - start_offset > 4 * 0x1ffff)) {
/* Emit a branch island for large basic blocks */
code = emit_branch_island (cfg, code, start_offset);
offset = code - cfg->native_code;
start_offset = offset;
}
mono_debug_record_line_number (cfg, ins, offset);
dreg = ins->dreg;
sreg1 = ins->sreg1;
sreg2 = ins->sreg2;
imm = ins->inst_imm;
switch (ins->opcode) {
case OP_ICONST:
code = emit_imm (code, dreg, ins->inst_c0);
break;
case OP_I8CONST:
code = emit_imm64 (code, dreg, ins->inst_c0);
break;
case OP_MOVE:
if (dreg != sreg1)
arm_movx (code, dreg, sreg1);
break;
case OP_NOP:
case OP_RELAXED_NOP:
break;
case OP_JUMP_TABLE:
mono_add_patch_info_rel (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0, MONO_R_ARM64_IMM);
code = emit_imm64_template (code, dreg);
break;
case OP_BREAK:
/*
* gdb does not like encountering the hw breakpoint ins in the debugged code.
* So instead of emitting a trap, we emit a call a C function and place a
* breakpoint there.
*/
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
break;
case OP_LOCALLOC: {
guint8 *buf [16];
arm_addx_imm (code, ARMREG_IP0, sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
// FIXME: andx_imm doesn't work yet
code = emit_imm (code, ARMREG_IP1, -MONO_ARCH_FRAME_ALIGNMENT);
arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1);
//arm_andx_imm (code, ARMREG_IP0, sreg1, - MONO_ARCH_FRAME_ALIGNMENT);
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0);
arm_movspx (code, ARMREG_SP, ARMREG_IP1);
/* Init */
/* ip1 = pointer, ip0 = end */
arm_addx (code, ARMREG_IP0, ARMREG_IP1, ARMREG_IP0);
buf [0] = code;
arm_cmpx (code, ARMREG_IP1, ARMREG_IP0);
buf [1] = code;
arm_bcc (code, ARMCOND_EQ, 0);
arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_IP1, 0);
arm_addx_imm (code, ARMREG_IP1, ARMREG_IP1, 16);
arm_b (code, buf [0]);
arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC);
arm_movspx (code, dreg, ARMREG_SP);
if (cfg->param_area)
code = emit_subx_sp_imm (code, cfg->param_area);
break;
}
case OP_LOCALLOC_IMM: {
int imm, offset;
imm = ALIGN_TO (ins->inst_imm, MONO_ARCH_FRAME_ALIGNMENT);
g_assert (arm_is_arith_imm (imm));
arm_subx_imm (code, ARMREG_SP, ARMREG_SP, imm);
/* Init */
g_assert (MONO_ARCH_FRAME_ALIGNMENT == 16);
offset = 0;
while (offset < imm) {
arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_SP, offset);
offset += 16;
}
arm_movspx (code, dreg, ARMREG_SP);
if (cfg->param_area)
code = emit_subx_sp_imm (code, cfg->param_area);
break;
}
case OP_AOTCONST:
code = emit_aotconst (cfg, code, dreg, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
break;
case OP_OBJC_GET_SELECTOR:
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
/* See arch_emit_objc_selector_ref () in aot-compiler.c */
arm_ldrx_lit (code, ins->dreg, 0);
arm_nop (code);
arm_nop (code);
break;
case OP_SEQ_POINT: {
MonoInst *info_var = cfg->arch.seq_point_info_var;
/*
* For AOT, we use one got slot per method, which will point to a
* SeqPointInfo structure, containing all the information required
* by the code below.
*/
if (cfg->compile_aot) {
g_assert (info_var);
g_assert (info_var->opcode == OP_REGOFFSET);
}
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
MonoInst *var = cfg->arch.ss_tramp_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
/* Load ss_tramp_var */
/* This is equal to &ss_trampoline */
arm_ldrx (code, ARMREG_IP1, var->inst_basereg, var->inst_offset);
/* Load the trampoline address */
arm_ldrx (code, ARMREG_IP1, ARMREG_IP1, 0);
/* Call it if it is non-null */
arm_cbzx (code, ARMREG_IP1, code + 8);
code = mono_arm_emit_blrx (code, ARMREG_IP1);
}
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
if (cfg->compile_aot) {
const guint32 offset = code - cfg->native_code;
guint32 val;
arm_ldrx (code, ARMREG_IP1, info_var->inst_basereg, info_var->inst_offset);
/* Add the offset */
val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either 0 or the address of the bp trampoline */
code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP1, val);
/* Skip the load if its 0 */
arm_cbzx (code, ARMREG_IP1, code + 8);
/* Call the breakpoint trampoline */
code = mono_arm_emit_blrx (code, ARMREG_IP1);
} else {
MonoInst *var = cfg->arch.bp_tramp_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
/* Load the address of the bp trampoline into IP0 */
arm_ldrx (code, ARMREG_IP0, var->inst_basereg, var->inst_offset);
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
arm_nop (code);
}
break;
}
/* BRANCH */
case OP_BR:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_B);
arm_b (code, code);
break;
case OP_BR_REG:
arm_brx (code, sreg1);
break;
case OP_IBEQ:
case OP_IBGE:
case OP_IBGT:
case OP_IBLE:
case OP_IBLT:
case OP_IBNE_UN:
case OP_IBGE_UN:
case OP_IBGT_UN:
case OP_IBLE_UN:
case OP_IBLT_UN:
case OP_LBEQ:
case OP_LBGE:
case OP_LBGT:
case OP_LBLE:
case OP_LBLT:
case OP_LBNE_UN:
case OP_LBGE_UN:
case OP_LBGT_UN:
case OP_LBLE_UN:
case OP_LBLT_UN:
case OP_FBEQ:
case OP_FBNE_UN:
case OP_FBLT:
case OP_FBGT:
case OP_FBGT_UN:
case OP_FBLE:
case OP_FBGE:
case OP_FBGE_UN: {
int cond;
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
cond = opcode_to_armcond (ins->opcode);
arm_bcc (code, cond, 0);
break;
}
case OP_FBLT_UN:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
/* For fp compares, ARMCOND_LT is lt or unordered */
arm_bcc (code, ARMCOND_LT, 0);
break;
case OP_FBLE_UN:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
arm_bcc (code, ARMCOND_EQ, 0);
mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
/* For fp compares, ARMCOND_LT is lt or unordered */
arm_bcc (code, ARMCOND_LT, 0);
break;
case OP_ARM64_CBZW:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbzw (code, sreg1, 0);
break;
case OP_ARM64_CBZX:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbzx (code, sreg1, 0);
break;
case OP_ARM64_CBNZW:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbnzw (code, sreg1, 0);
break;
case OP_ARM64_CBNZX:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbnzx (code, sreg1, 0);
break;
/* ALU */
case OP_IADD:
arm_addw (code, dreg, sreg1, sreg2);
break;
case OP_LADD:
arm_addx (code, dreg, sreg1, sreg2);
break;
case OP_ISUB:
arm_subw (code, dreg, sreg1, sreg2);
break;
case OP_LSUB:
arm_subx (code, dreg, sreg1, sreg2);
break;
case OP_IAND:
arm_andw (code, dreg, sreg1, sreg2);
break;
case OP_LAND:
arm_andx (code, dreg, sreg1, sreg2);
break;
case OP_IOR:
arm_orrw (code, dreg, sreg1, sreg2);
break;
case OP_LOR:
arm_orrx (code, dreg, sreg1, sreg2);
break;
case OP_IXOR:
arm_eorw (code, dreg, sreg1, sreg2);
break;
case OP_LXOR:
arm_eorx (code, dreg, sreg1, sreg2);
break;
case OP_INEG:
arm_negw (code, dreg, sreg1);
break;
case OP_LNEG:
arm_negx (code, dreg, sreg1);
break;
case OP_INOT:
arm_mvnw (code, dreg, sreg1);
break;
case OP_LNOT:
arm_mvnx (code, dreg, sreg1);
break;
case OP_IADDCC:
arm_addsw (code, dreg, sreg1, sreg2);
break;
case OP_ADDCC:
case OP_LADDCC:
arm_addsx (code, dreg, sreg1, sreg2);
break;
case OP_ISUBCC:
arm_subsw (code, dreg, sreg1, sreg2);
break;
case OP_LSUBCC:
case OP_SUBCC:
arm_subsx (code, dreg, sreg1, sreg2);
break;
case OP_ICOMPARE:
arm_cmpw (code, sreg1, sreg2);
break;
case OP_COMPARE:
case OP_LCOMPARE:
arm_cmpx (code, sreg1, sreg2);
break;
case OP_IADD_IMM:
code = emit_addw_imm (code, dreg, sreg1, imm);
break;
case OP_LADD_IMM:
case OP_ADD_IMM:
code = emit_addx_imm (code, dreg, sreg1, imm);
break;
case OP_ISUB_IMM:
code = emit_subw_imm (code, dreg, sreg1, imm);
break;
case OP_LSUB_IMM:
code = emit_subx_imm (code, dreg, sreg1, imm);
break;
case OP_IAND_IMM:
code = emit_andw_imm (code, dreg, sreg1, imm);
break;
case OP_LAND_IMM:
case OP_AND_IMM:
code = emit_andx_imm (code, dreg, sreg1, imm);
break;
case OP_IOR_IMM:
code = emit_orrw_imm (code, dreg, sreg1, imm);
break;
case OP_LOR_IMM:
code = emit_orrx_imm (code, dreg, sreg1, imm);
break;
case OP_IXOR_IMM:
code = emit_eorw_imm (code, dreg, sreg1, imm);
break;
case OP_LXOR_IMM:
code = emit_eorx_imm (code, dreg, sreg1, imm);
break;
case OP_ICOMPARE_IMM:
code = emit_cmpw_imm (code, sreg1, imm);
break;
case OP_LCOMPARE_IMM:
case OP_COMPARE_IMM:
if (imm == 0) {
arm_cmpx (code, sreg1, ARMREG_RZR);
} else {
// FIXME: 32 vs 64 bit issues for 0xffffffff
code = emit_imm64 (code, ARMREG_LR, imm);
arm_cmpx (code, sreg1, ARMREG_LR);
}
break;
case OP_ISHL:
arm_lslvw (code, dreg, sreg1, sreg2);
break;
case OP_LSHL:
arm_lslvx (code, dreg, sreg1, sreg2);
break;
case OP_ISHR:
arm_asrvw (code, dreg, sreg1, sreg2);
break;
case OP_LSHR:
arm_asrvx (code, dreg, sreg1, sreg2);
break;
case OP_ISHR_UN:
arm_lsrvw (code, dreg, sreg1, sreg2);
break;
case OP_LSHR_UN:
arm_lsrvx (code, dreg, sreg1, sreg2);
break;
case OP_ISHL_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lslw (code, dreg, sreg1, imm);
break;
case OP_SHL_IMM:
case OP_LSHL_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lslx (code, dreg, sreg1, imm);
break;
case OP_ISHR_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_asrw (code, dreg, sreg1, imm);
break;
case OP_LSHR_IMM:
case OP_SHR_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_asrx (code, dreg, sreg1, imm);
break;
case OP_ISHR_UN_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lsrw (code, dreg, sreg1, imm);
break;
case OP_SHR_UN_IMM:
case OP_LSHR_UN_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lsrx (code, dreg, sreg1, imm);
break;
/* 64BIT ALU */
case OP_SEXT_I4:
arm_sxtwx (code, dreg, sreg1);
break;
case OP_ZEXT_I4:
/* Clean out the upper word */
arm_movw (code, dreg, sreg1);
break;
/* MULTIPLY/DIVISION */
case OP_IDIV:
case OP_IREM:
// FIXME: Optimize this
/* Check for zero */
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
/* Check for INT_MIN/-1 */
code = emit_imm (code, ARMREG_IP0, 0x80000000);
arm_cmpx (code, sreg1, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP1);
code = emit_imm (code, ARMREG_IP0, 0xffffffff);
arm_cmpx (code, sreg2, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP0);
arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1);
arm_cmpx_imm (code, ARMREG_IP0, 1);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException");
if (ins->opcode == OP_IREM) {
arm_sdivw (code, ARMREG_LR, sreg1, sreg2);
arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1);
} else {
arm_sdivw (code, dreg, sreg1, sreg2);
}
break;
case OP_IDIV_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivw (code, dreg, sreg1, sreg2);
break;
case OP_IREM_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivw (code, ARMREG_LR, sreg1, sreg2);
arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1);
break;
case OP_LDIV:
case OP_LREM:
// FIXME: Optimize this
/* Check for zero */
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
/* Check for INT64_MIN/-1 */
code = emit_imm64 (code, ARMREG_IP0, 0x8000000000000000);
arm_cmpx (code, sreg1, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP1);
code = emit_imm64 (code, ARMREG_IP0, 0xffffffffffffffff);
arm_cmpx (code, sreg2, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP0);
arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1);
arm_cmpx_imm (code, ARMREG_IP0, 1);
/* 64 bit uses OverflowException */
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException");
if (ins->opcode == OP_LREM) {
arm_sdivx (code, ARMREG_LR, sreg1, sreg2);
arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1);
} else {
arm_sdivx (code, dreg, sreg1, sreg2);
}
break;
case OP_LDIV_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivx (code, dreg, sreg1, sreg2);
break;
case OP_LREM_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivx (code, ARMREG_LR, sreg1, sreg2);
arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1);
break;
case OP_IMUL:
arm_mulw (code, dreg, sreg1, sreg2);
break;
case OP_LMUL:
arm_mulx (code, dreg, sreg1, sreg2);
break;
case OP_IMUL_IMM:
code = emit_imm (code, ARMREG_LR, imm);
arm_mulw (code, dreg, sreg1, ARMREG_LR);
break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
code = emit_imm (code, ARMREG_LR, imm);
arm_mulx (code, dreg, sreg1, ARMREG_LR);
break;
/* CONVERSIONS */
case OP_ICONV_TO_I1:
case OP_LCONV_TO_I1:
arm_sxtbx (code, dreg, sreg1);
break;
case OP_ICONV_TO_I2:
case OP_LCONV_TO_I2:
arm_sxthx (code, dreg, sreg1);
break;
case OP_ICONV_TO_U1:
case OP_LCONV_TO_U1:
arm_uxtbw (code, dreg, sreg1);
break;
case OP_ICONV_TO_U2:
case OP_LCONV_TO_U2:
arm_uxthw (code, dreg, sreg1);
break;
/* CSET */
case OP_CEQ:
case OP_ICEQ:
case OP_LCEQ:
case OP_CLT:
case OP_ICLT:
case OP_LCLT:
case OP_CGT:
case OP_ICGT:
case OP_LCGT:
case OP_CLT_UN:
case OP_ICLT_UN:
case OP_LCLT_UN:
case OP_CGT_UN:
case OP_ICGT_UN:
case OP_LCGT_UN:
case OP_ICNEQ:
case OP_ICGE:
case OP_ICLE:
case OP_ICGE_UN:
case OP_ICLE_UN: {
int cond;
cond = opcode_to_armcond (ins->opcode);
arm_cset (code, cond, dreg);
break;
}
case OP_FCEQ:
case OP_FCLT:
case OP_FCLT_UN:
case OP_FCGT:
case OP_FCGT_UN:
case OP_FCNEQ:
case OP_FCLE:
case OP_FCGE: {
int cond;
cond = opcode_to_armcond (ins->opcode);
arm_fcmpd (code, sreg1, sreg2);
arm_cset (code, cond, dreg);
break;
}
/* MEMORY */
case OP_LOADI1_MEMBASE:
code = emit_ldrsbx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU1_MEMBASE:
code = emit_ldrb (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADI2_MEMBASE:
code = emit_ldrshx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU2_MEMBASE:
code = emit_ldrh (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADI4_MEMBASE:
code = emit_ldrswx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU4_MEMBASE:
code = emit_ldrw (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE:
code = emit_ldrx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
case OP_STORE_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM: {
int immreg;
if (imm != 0) {
code = emit_imm (code, ARMREG_LR, imm);
immreg = ARMREG_LR;
} else {
immreg = ARMREG_RZR;
}
switch (ins->opcode) {
case OP_STOREI1_MEMBASE_IMM:
code = emit_strb (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_IMM:
code = emit_strh (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI4_MEMBASE_IMM:
code = emit_strw (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM:
code = emit_strx (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
default:
g_assert_not_reached ();
break;
}
break;
}
case OP_STOREI1_MEMBASE_REG:
code = emit_strb (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_REG:
code = emit_strh (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI4_MEMBASE_REG:
code = emit_strw (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG:
code = emit_strx (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_TLS_GET:
code = emit_tls_get (code, dreg, ins->inst_offset);
break;
case OP_TLS_SET:
code = emit_tls_set (code, sreg1, ins->inst_offset);
break;
/* Atomic */
case OP_MEMORY_BARRIER:
arm_dmb (code, ARM_DMB_ISH);
break;
case OP_ATOMIC_ADD_I4: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrw (code, ARMREG_IP0, sreg1);
arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2);
arm_stlxrw (code, ARMREG_IP1, ARMREG_IP0, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_ADD_I8: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrx (code, ARMREG_IP0, sreg1);
arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2);
arm_stlxrx (code, ARMREG_IP1, ARMREG_IP0, sreg1);
arm_cbnzx (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_EXCHANGE_I4: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrw (code, ARMREG_IP0, sreg1);
arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_EXCHANGE_I8: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrx (code, ARMREG_IP0, sreg1);
arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_CAS_I4: {
guint8 *buf [16];
/* sreg2 is the value, sreg3 is the comparand */
buf [0] = code;
arm_ldxrw (code, ARMREG_IP0, sreg1);
arm_cmpw (code, ARMREG_IP0, ins->sreg3);
buf [1] = code;
arm_bcc (code, ARMCOND_NE, 0);
arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_CAS_I8: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrx (code, ARMREG_IP0, sreg1);
arm_cmpx (code, ARMREG_IP0, ins->sreg3);
buf [1] = code;
arm_bcc (code, ARMCOND_NE, 0);
arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_LOAD_I1: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarb (code, ins->dreg, ARMREG_LR);
arm_sxtbx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_U1: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarb (code, ins->dreg, ARMREG_LR);
arm_uxtbx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_I2: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarh (code, ins->dreg, ARMREG_LR);
arm_sxthx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_U2: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarh (code, ins->dreg, ARMREG_LR);
arm_uxthx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_I4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarw (code, ins->dreg, ARMREG_LR);
arm_sxtwx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_U4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarw (code, ins->dreg, ARMREG_LR);
arm_movw (code, ins->dreg, ins->dreg); /* Clear upper half of the register. */
break;
}
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarx (code, ins->dreg, ARMREG_LR);
break;
}
case OP_ATOMIC_LOAD_R4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarw (code, ARMREG_LR, ARMREG_LR);
arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR);
break;
}
case OP_ATOMIC_LOAD_R8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarx (code, ARMREG_LR, ARMREG_LR);
arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrb (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrh (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrw (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrx (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_R4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1);
arm_stlrw (code, ARMREG_LR, ARMREG_IP0);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_R8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1);
arm_stlrx (code, ARMREG_LR, ARMREG_IP0);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
/* FP */
case OP_R8CONST: {
guint64 imm = *(guint64*)ins->inst_p0;
if (imm == 0) {
arm_fmov_rx_to_double (code, dreg, ARMREG_RZR);
} else {
code = emit_imm64 (code, ARMREG_LR, imm);
arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR);
}
break;
}
case OP_R4CONST: {
guint64 imm = *(guint32*)ins->inst_p0;
code = emit_imm64 (code, ARMREG_LR, imm);
arm_fmov_rx_to_double (code, dreg, ARMREG_LR);
break;
}
case OP_LOADR8_MEMBASE:
code = emit_ldrfpx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADR4_MEMBASE:
code = emit_ldrfpw (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_STORER8_MEMBASE_REG:
code = emit_strfpx (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORER4_MEMBASE_REG:
code = emit_strfpw (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_FMOVE:
if (dreg != sreg1)
arm_fmovd (code, dreg, sreg1);
break;
case OP_RMOVE:
if (dreg != sreg1)
arm_fmovs (code, dreg, sreg1);
break;
case OP_MOVE_F_TO_I4:
arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_I4_TO_F:
arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I8:
arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_I8_TO_F:
arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1);
break;
case OP_FCOMPARE:
arm_fcmpd (code, sreg1, sreg2);
break;
case OP_RCOMPARE:
arm_fcmps (code, sreg1, sreg2);
break;
case OP_FCONV_TO_I1:
arm_fcvtzs_dx (code, dreg, sreg1);
arm_sxtbx (code, dreg, dreg);
break;
case OP_FCONV_TO_U1:
arm_fcvtzu_dx (code, dreg, sreg1);
arm_uxtbw (code, dreg, dreg);
break;
case OP_FCONV_TO_I2:
arm_fcvtzs_dx (code, dreg, sreg1);
arm_sxthx (code, dreg, dreg);
break;
case OP_FCONV_TO_U2:
arm_fcvtzu_dx (code, dreg, sreg1);
arm_uxthw (code, dreg, dreg);
break;
case OP_FCONV_TO_I4:
arm_fcvtzs_dx (code, dreg, sreg1);
arm_sxtwx (code, dreg, dreg);
break;
case OP_FCONV_TO_U4:
arm_fcvtzu_dx (code, dreg, sreg1);
break;
case OP_FCONV_TO_I8:
case OP_FCONV_TO_I:
arm_fcvtzs_dx (code, dreg, sreg1);
break;
case OP_FCONV_TO_U8:
arm_fcvtzu_dx (code, dreg, sreg1);
break;
case OP_FCONV_TO_R4:
arm_fcvt_ds (code, dreg, sreg1);
break;
case OP_ICONV_TO_R4:
arm_scvtf_rw_to_s (code, dreg, sreg1);
break;
case OP_LCONV_TO_R4:
arm_scvtf_rx_to_s (code, dreg, sreg1);
break;
case OP_ICONV_TO_R8:
arm_scvtf_rw_to_d (code, dreg, sreg1);
break;
case OP_LCONV_TO_R8:
arm_scvtf_rx_to_d (code, dreg, sreg1);
break;
case OP_ICONV_TO_R_UN:
arm_ucvtf_rw_to_d (code, dreg, sreg1);
break;
case OP_LCONV_TO_R_UN:
arm_ucvtf_rx_to_d (code, dreg, sreg1);
break;
case OP_FADD:
arm_fadd_d (code, dreg, sreg1, sreg2);
break;
case OP_FSUB:
arm_fsub_d (code, dreg, sreg1, sreg2);
break;
case OP_FMUL:
arm_fmul_d (code, dreg, sreg1, sreg2);
break;
case OP_FDIV:
arm_fdiv_d (code, dreg, sreg1, sreg2);
break;
case OP_FREM:
/* Emulated */
g_assert_not_reached ();
break;
case OP_FNEG:
arm_fneg_d (code, dreg, sreg1);
break;
case OP_ARM_SETFREG_R4:
arm_fcvt_ds (code, dreg, sreg1);
break;
case OP_CKFINITE:
/* Check for infinity */
code = emit_imm64 (code, ARMREG_LR, 0x7fefffffffffffffLL);
arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR);
arm_fabs_d (code, FP_TEMP_REG2, sreg1);
arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG);
code = emit_cond_exc (cfg, code, OP_COND_EXC_GT, "ArithmeticException");
/* Check for nans */
arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG2);
code = emit_cond_exc (cfg, code, OP_COND_EXC_OV, "ArithmeticException");
arm_fmovd (code, dreg, sreg1);
break;
/* R4 */
case OP_RADD:
arm_fadd_s (code, dreg, sreg1, sreg2);
break;
case OP_RSUB:
arm_fsub_s (code, dreg, sreg1, sreg2);
break;
case OP_RMUL:
arm_fmul_s (code, dreg, sreg1, sreg2);
break;
case OP_RDIV:
arm_fdiv_s (code, dreg, sreg1, sreg2);
break;
case OP_RNEG:
arm_fneg_s (code, dreg, sreg1);
break;
case OP_RCONV_TO_I1:
arm_fcvtzs_sx (code, dreg, sreg1);
arm_sxtbx (code, dreg, dreg);
break;
case OP_RCONV_TO_U1:
arm_fcvtzu_sx (code, dreg, sreg1);
arm_uxtbw (code, dreg, dreg);
break;
case OP_RCONV_TO_I2:
arm_fcvtzs_sx (code, dreg, sreg1);
arm_sxthx (code, dreg, dreg);
break;
case OP_RCONV_TO_U2:
arm_fcvtzu_sx (code, dreg, sreg1);
arm_uxthw (code, dreg, dreg);
break;
case OP_RCONV_TO_I4:
arm_fcvtzs_sx (code, dreg, sreg1);
arm_sxtwx (code, dreg, dreg);
break;
case OP_RCONV_TO_U4:
arm_fcvtzu_sx (code, dreg, sreg1);
break;
case OP_RCONV_TO_I8:
case OP_RCONV_TO_I:
arm_fcvtzs_sx (code, dreg, sreg1);
break;
case OP_RCONV_TO_U8:
arm_fcvtzu_sx (code, dreg, sreg1);
break;
case OP_RCONV_TO_R8:
arm_fcvt_sd (code, dreg, sreg1);
break;
case OP_RCONV_TO_R4:
if (dreg != sreg1)
arm_fmovs (code, dreg, sreg1);
break;
case OP_RCEQ:
case OP_RCLT:
case OP_RCLT_UN:
case OP_RCGT:
case OP_RCGT_UN:
case OP_RCNEQ:
case OP_RCLE:
case OP_RCGE: {
int cond;
cond = opcode_to_armcond (ins->opcode);
arm_fcmps (code, sreg1, sreg2);
arm_cset (code, cond, dreg);
break;
}
/* CALLS */
case OP_VOIDCALL:
case OP_CALL:
case OP_LCALL:
case OP_FCALL:
case OP_RCALL:
case OP_VCALL2: {
call = (MonoCallInst*)ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
code = emit_move_return_value (cfg, code, ins);
break;
}
case OP_VOIDCALL_REG:
case OP_CALL_REG:
case OP_LCALL_REG:
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_VCALL2_REG:
code = mono_arm_emit_blrx (code, sreg1);
code = emit_move_return_value (cfg, code, ins);
break;
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
code = emit_ldrx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
code = mono_arm_emit_blrx (code, ARMREG_IP0);
code = emit_move_return_value (cfg, code, ins);
break;
case OP_TAILCALL_PARAMETER:
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL:
case OP_TAILCALL_MEMBASE:
case OP_TAILCALL_REG: {
int branch_reg = ARMREG_IP0;
guint64 free_reg = 1 << ARMREG_IP1;
call = (MonoCallInst*)ins;
g_assert (!cfg->method->save_lmf);
max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
while (G_UNLIKELY (offset + max_len > cfg->code_size)) {
cfg->code_size *= 2;
cfg->native_code = (unsigned char *)mono_realloc_native_code (cfg);
code = cfg->native_code + offset;
cfg->stat_code_reallocs++;
}
switch (ins->opcode) {
case OP_TAILCALL:
free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1);
break;
case OP_TAILCALL_REG:
g_assert (sreg1 != -1);
g_assert (sreg1 != ARMREG_IP0);
g_assert (sreg1 != ARMREG_IP1);
g_assert (sreg1 != ARMREG_LR);
g_assert (sreg1 != ARMREG_SP);
g_assert (sreg1 != ARMREG_R28);
if ((sreg1 << 1) & MONO_ARCH_CALLEE_SAVED_REGS) {
arm_movx (code, branch_reg, sreg1);
} else {
free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1);
branch_reg = sreg1;
}
break;
case OP_TAILCALL_MEMBASE:
g_assert (ins->inst_basereg != -1);
g_assert (ins->inst_basereg != ARMREG_IP0);
g_assert (ins->inst_basereg != ARMREG_IP1);
g_assert (ins->inst_basereg != ARMREG_LR);
g_assert (ins->inst_basereg != ARMREG_SP);
g_assert (ins->inst_basereg != ARMREG_R28);
code = emit_ldrx (code, branch_reg, ins->inst_basereg, ins->inst_offset);
break;
default:
g_assert_not_reached ();
}
// Copy stack arguments.
// FIXME a fixed size memcpy is desirable here,
// at least for larger values of stack_usage.
for (int i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
code = emit_ldrx (code, ARMREG_LR, ARMREG_SP, i);
code = emit_strx (code, ARMREG_LR, ARMREG_R28, i);
}
/* Restore registers */
code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset);
/* Destroy frame */
code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, free_reg);
if (enable_ptrauth)
/* There is no retab to authenticate lr */
arm_autibsp (code);
switch (ins->opcode) {
case OP_TAILCALL:
if (cfg->compile_aot) {
/* This is not a PLT patch */
code = emit_aotconst (cfg, code, branch_reg, MONO_PATCH_INFO_METHOD_JUMP, call->method);
} else {
mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method, MONO_R_ARM64_B);
arm_b (code, code);
cfg->thunk_area += THUNK_SIZE;
break;
}
// fallthrough
case OP_TAILCALL_MEMBASE:
case OP_TAILCALL_REG:
code = mono_arm_emit_brx (code, branch_reg);
break;
default:
g_assert_not_reached ();
}
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_ARGLIST:
g_assert (cfg->arch.cinfo);
code = emit_addx_imm (code, ARMREG_IP0, cfg->arch.args_reg, cfg->arch.cinfo->sig_cookie.offset);
arm_strx (code, ARMREG_IP0, sreg1, 0);
break;
case OP_DYN_CALL: {
MonoInst *var = cfg->dyn_call_var;
guint8 *labels [16];
int i;
/*
* sreg1 points to a DynCallArgs structure initialized by mono_arch_start_dyn_call ().
* sreg2 is the function to call.
*/
g_assert (var->opcode == OP_REGOFFSET);
arm_movx (code, ARMREG_LR, sreg1);
arm_movx (code, ARMREG_IP1, sreg2);
/* Save args buffer */
code = emit_strx (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
/* Set fp argument regs */
code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpargs));
arm_cmpw (code, ARMREG_R0, ARMREG_RZR);
labels [0] = code;
arm_bcc (code, ARMCOND_EQ, 0);
for (i = 0; i < 8; ++i)
code = emit_ldrfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8));
arm_patch_rel (labels [0], code, MONO_R_ARM64_BCC);
/* Allocate callee area */
code = emit_ldrx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
arm_lslw (code, ARMREG_R0, ARMREG_R0, 3);
arm_movspx (code, ARMREG_R1, ARMREG_SP);
arm_subx (code, ARMREG_R1, ARMREG_R1, ARMREG_R0);
arm_movspx (code, ARMREG_SP, ARMREG_R1);
/* Set stack args */
/* R1 = limit */
code = emit_ldrx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
/* R2 = pointer into 'regs' */
code = emit_imm (code, ARMREG_R2, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS + 1) * sizeof (target_mgreg_t)));
arm_addx (code, ARMREG_R2, ARMREG_LR, ARMREG_R2);
/* R3 = pointer to stack */
arm_movspx (code, ARMREG_R3, ARMREG_SP);
labels [0] = code;
arm_b (code, code);
labels [1] = code;
code = emit_ldrx (code, ARMREG_R5, ARMREG_R2, 0);
code = emit_strx (code, ARMREG_R5, ARMREG_R3, 0);
code = emit_addx_imm (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t));
code = emit_addx_imm (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t));
code = emit_subx_imm (code, ARMREG_R1, ARMREG_R1, 1);
arm_patch_rel (labels [0], code, MONO_R_ARM64_B);
arm_cmpw (code, ARMREG_R1, ARMREG_RZR);
arm_bcc (code, ARMCOND_GT, labels [1]);
/* Set argument registers + r8 */
code = mono_arm_emit_load_regarray (code, 0x1ff, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs));
/* Make the call */
code = mono_arm_emit_blrx (code, ARMREG_IP1);
/* Save result */
code = emit_ldrx (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
arm_strx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res));
arm_strx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res2));
/* Save fp result */
code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpret));
arm_cmpw (code, ARMREG_R0, ARMREG_RZR);
labels [1] = code;
arm_bcc (code, ARMCOND_EQ, 0);
for (i = 0; i < 8; ++i)
code = emit_strfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8));
arm_patch_rel (labels [1], code, MONO_R_ARM64_BCC);
break;
}
case OP_GENERIC_CLASS_INIT: {
int byte_offset;
guint8 *jump;
byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
/* Load vtable->initialized */
arm_ldrsbx (code, ARMREG_IP0, sreg1, byte_offset);
jump = code;
arm_cbnzx (code, ARMREG_IP0, 0);
/* Slowpath */
g_assert (sreg1 == ARMREG_R0);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
mono_arm_patch (jump, code, MONO_R_ARM64_CBZ);
break;
}
case OP_CHECK_THIS:
arm_ldrb (code, ARMREG_LR, sreg1, 0);
break;
case OP_NOT_NULL:
case OP_NOT_REACHED:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_I8CONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
/* EH */
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
case OP_COND_EXC_EQ:
case OP_COND_EXC_IEQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_ILT:
case OP_COND_EXC_LT:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_IGT:
case OP_COND_EXC_GT:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_IGE:
case OP_COND_EXC_GE:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_ILE:
case OP_COND_EXC_LE:
case OP_COND_EXC_ILE_UN:
case OP_COND_EXC_LE_UN:
code = emit_cond_exc (cfg, code, ins->opcode, (const char*)ins->inst_p1);
break;
case OP_THROW:
if (sreg1 != ARMREG_R0)
arm_movx (code, ARMREG_R0, sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
break;
case OP_RETHROW:
if (sreg1 != ARMREG_R0)
arm_movx (code, ARMREG_R0, sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
break;
case OP_CALL_HANDLER:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_BL);
arm_bl (code, 0);
cfg->thunk_area += THUNK_SIZE;
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
break;
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/* Save caller address */
code = emit_strx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
/*
* Reserve a param area, see test_0_finally_param_area ().
* This is needed because the param area is not set up when
* we are called from EH code.
*/
if (cfg->param_area)
code = emit_subx_sp_imm (code, cfg->param_area);
break;
}
case OP_ENDFINALLY:
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (cfg->param_area)
code = emit_addx_sp_imm (code, cfg->param_area);
if (ins->opcode == OP_ENDFILTER && sreg1 != ARMREG_R0)
arm_movx (code, ARMREG_R0, sreg1);
/* Return to either after the branch in OP_CALL_HANDLER, or to the EH code */
code = emit_ldrx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
arm_brx (code, ARMREG_LR);
break;
}
case OP_GET_EX_OBJ:
if (ins->dreg != ARMREG_R0)
arm_movx (code, ins->dreg, ARMREG_R0);
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
guint8 *buf [1];
arm_ldrx (code, ARMREG_IP1, ins->sreg1, 0);
/* Call it if it is non-null */
buf [0] = code;
arm_cbzx (code, ARMREG_IP1, 0);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
mono_arm_patch (buf [0], code, MONO_R_ARM64_CBZ);
break;
}
case OP_FILL_PROF_CALL_CTX:
for (int i = 0; i < MONO_MAX_IREGS; i++)
if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
arm_strx (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
}
}
set_code_cursor (cfg, code);
/*
* If the compiled code size is larger than the bcc displacement (19 bits signed),
* insert branch islands between/inside basic blocks.
*/
if (cfg->arch.cond_branch_islands)
code = emit_branch_island (cfg, code, start_offset);
}
static guint8*
emit_move_args (MonoCompile *cfg, guint8 *code)
{
MonoInst *ins;
CallInfo *cinfo;
ArgInfo *ainfo;
int i, part;
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
cinfo = cfg->arch.cinfo;
g_assert (cinfo);
for (i = 0; i < cinfo->nargs; ++i) {
ainfo = cinfo->args + i;
ins = cfg->args [i];
if (ins->opcode == OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg:
arm_movx (code, ins->dreg, ainfo->reg);
if (i == 0 && sig->hasthis) {
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0);
}
break;
case ArgOnStack:
switch (ainfo->slot_size) {
case 1:
if (ainfo->sign)
code = emit_ldrsbx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
else
code = emit_ldrb (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
case 2:
if (ainfo->sign)
code = emit_ldrshx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
else
code = emit_ldrh (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
case 4:
if (ainfo->sign)
code = emit_ldrswx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
else
code = emit_ldrw (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
default:
code = emit_ldrx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
}
break;
default:
g_assert_not_reached ();
break;
}
} else {
if (ainfo->storage != ArgVtypeByRef && ainfo->storage != ArgVtypeByRefOnStack)
g_assert (ins->opcode == OP_REGOFFSET);
switch (ainfo->storage) {
case ArgInIReg:
/* Stack slots for arguments have size 8 */
code = emit_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
if (i == 0 && sig->hasthis) {
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
break;
case ArgInFReg:
code = emit_strfpx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
break;
case ArgInFRegR4:
code = emit_strfpw (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
break;
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
case ArgVtypeByRefOnStack:
case ArgVtypeOnStack:
break;
case ArgVtypeByRef: {
MonoInst *addr_arg = ins->inst_left;
if (ainfo->gsharedvt) {
g_assert (ins->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
arm_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
} else {
g_assert (ins->opcode == OP_VTARG_ADDR);
g_assert (addr_arg->opcode == OP_REGOFFSET);
arm_strx (code, ainfo->reg, addr_arg->inst_basereg, addr_arg->inst_offset);
}
break;
}
case ArgVtypeInIRegs:
for (part = 0; part < ainfo->nregs; part ++) {
code = emit_strx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + (part * 8));
}
break;
case ArgHFA:
for (part = 0; part < ainfo->nregs; part ++) {
if (ainfo->esize == 4)
code = emit_strfpw (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]);
else
code = emit_strfpx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]);
}
break;
default:
g_assert_not_reached ();
break;
}
}
}
return code;
}
/*
* emit_store_regarray:
*
* Emit code to store the registers in REGS into the appropriate elements of
* the register array at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
int i;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if (i + 1 < 32 && (regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
arm_stpx (code, i, i + 1, basereg, offset + (i * 8));
i++;
} else if (i == ARMREG_SP) {
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_strx (code, ARMREG_IP1, basereg, offset + (i * 8));
} else {
arm_strx (code, i, basereg, offset + (i * 8));
}
}
}
return code;
}
/*
* emit_load_regarray:
*
* Emit code to load the registers in REGS from the appropriate elements of
* the register array at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
int i;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
if (offset + (i * 8) < 500)
arm_ldpx (code, i, i + 1, basereg, offset + (i * 8));
else {
code = emit_ldrx (code, i, basereg, offset + (i * 8));
code = emit_ldrx (code, i + 1, basereg, offset + ((i + 1) * 8));
}
i++;
} else if (i == ARMREG_SP) {
g_assert_not_reached ();
} else {
code = emit_ldrx (code, i, basereg, offset + (i * 8));
}
}
}
return code;
}
/*
* emit_store_regset:
*
* Emit code to store the registers in REGS into consecutive memory locations starting
* at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset)
{
int i, pos;
pos = 0;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
arm_stpx (code, i, i + 1, basereg, offset + (pos * 8));
i++;
pos++;
} else if (i == ARMREG_SP) {
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_strx (code, ARMREG_IP1, basereg, offset + (pos * 8));
} else {
arm_strx (code, i, basereg, offset + (pos * 8));
}
pos++;
}
}
return code;
}
/*
* emit_load_regset:
*
* Emit code to load the registers in REGS from consecutive memory locations starting
* at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset)
{
int i, pos;
pos = 0;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
arm_ldpx (code, i, i + 1, basereg, offset + (pos * 8));
i++;
pos++;
} else if (i == ARMREG_SP) {
g_assert_not_reached ();
} else {
arm_ldrx (code, i, basereg, offset + (pos * 8));
}
pos++;
}
}
return code;
}
WARN_UNUSED_RESULT guint8*
mono_arm_emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
return emit_load_regarray (code, regs, basereg, offset);
}
WARN_UNUSED_RESULT guint8*
mono_arm_emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
return emit_store_regarray (code, regs, basereg, offset);
}
WARN_UNUSED_RESULT guint8*
mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset)
{
return emit_store_regset (code, regs, basereg, offset);
}
/* Same as emit_store_regset, but emit unwind info too */
/* CFA_OFFSET is the offset between the CFA and basereg */
static WARN_UNUSED_RESULT guint8*
emit_store_regset_cfa (MonoCompile *cfg, guint8 *code, guint64 regs, int basereg, int offset, int cfa_offset, guint64 no_cfa_regset)
{
int i, j, pos, nregs;
guint32 cfa_regset = regs & ~no_cfa_regset;
pos = 0;
for (i = 0; i < 32; ++i) {
nregs = 1;
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
if (offset < 256) {
arm_stpx (code, i, i + 1, basereg, offset + (pos * 8));
} else {
code = emit_strx (code, i, basereg, offset + (pos * 8));
code = emit_strx (code, i + 1, basereg, offset + (pos * 8) + 8);
}
nregs = 2;
} else if (i == ARMREG_SP) {
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
code = emit_strx (code, ARMREG_IP1, basereg, offset + (pos * 8));
} else {
code = emit_strx (code, i, basereg, offset + (pos * 8));
}
for (j = 0; j < nregs; ++j) {
if (cfa_regset & (1 << (i + j)))
mono_emit_unwind_op_offset (cfg, code, i + j, (- cfa_offset) + offset + ((pos + j) * 8));
}
i += nregs - 1;
pos += nregs;
}
}
return code;
}
/*
* emit_setup_lmf:
*
* Emit code to initialize an LMF structure at LMF_OFFSET.
* Clobbers ip0/ip1.
*/
static guint8*
emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset)
{
/*
* The LMF should contain all the state required to be able to reconstruct the machine state
* at the current point of execution. Since the LMF is only read during EH, only callee
* saved etc. registers need to be saved.
* FIXME: Save callee saved fp regs, JITted code doesn't use them, but native code does, and they
* need to be restored during EH.
*/
/* pc */
arm_adrx (code, ARMREG_LR, code);
code = emit_strx (code, ARMREG_LR, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, pc));
/* gregs + fp + sp */
/* Don't emit unwind info for sp/fp, they are already handled in the prolog */
code = emit_store_regset_cfa (cfg, code, MONO_ARCH_LMF_REGS, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs), cfa_offset, (1 << ARMREG_FP) | (1 << ARMREG_SP));
return code;
}
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoMethodSignature *sig;
MonoBasicBlock *bb;
guint8 *code;
int cfa_offset, max_offset;
sig = mono_method_signature_internal (method);
cfg->code_size = 256 + sig->param_count * 64;
code = cfg->native_code = g_malloc (cfg->code_size);
/* This can be unaligned */
cfg->stack_offset = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
/*
* - Setup frame
*/
cfa_offset = 0;
mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
if (enable_ptrauth)
arm_pacibsp (code);
/* Setup frame */
if (arm_is_ldpx_imm (-cfg->stack_offset)) {
arm_stpx_pre (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, -cfg->stack_offset);
} else {
/* sp -= cfg->stack_offset */
/* This clobbers ip0/ip1 */
code = emit_subx_sp_imm (code, cfg->stack_offset);
arm_stpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0);
}
cfa_offset += cfg->stack_offset;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
mono_emit_unwind_op_offset (cfg, code, ARMREG_FP, (- cfa_offset) + 0);
mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, (- cfa_offset) + 8);
arm_movspx (code, ARMREG_FP, ARMREG_SP);
mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_FP);
if (cfg->param_area) {
/* The param area is below the frame pointer */
code = emit_subx_sp_imm (code, cfg->param_area);
}
if (cfg->method->save_lmf) {
code = emit_setup_lmf (cfg, code, cfg->lmf_var->inst_offset, cfa_offset);
} else {
/* Save gregs */
code = emit_store_regset_cfa (cfg, code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset, cfa_offset, 0);
}
/* Setup args reg */
if (cfg->arch.args_reg) {
/* The register was already saved above */
code = emit_addx_imm (code, cfg->arch.args_reg, ARMREG_FP, cfg->stack_offset);
}
/* Save return area addr received in R8 */
if (cfg->vret_addr) {
MonoInst *ins = cfg->vret_addr;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_strx (code, ARMREG_R8, ins->inst_basereg, ins->inst_offset);
}
/* Save mrgctx received in MONO_ARCH_RGCTX_REG */
if (cfg->rgctx_var) {
MonoInst *ins = cfg->rgctx_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_strx (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
/*
* Move arguments to their registers/stack locations.
*/
code = emit_move_args (cfg, code);
/* Initialize seq_point_info_var */
if (cfg->arch.seq_point_info_var) {
MonoInst *ins = cfg->arch.seq_point_info_var;
/* Initialize the variable from a GOT slot */
code = emit_aotconst (cfg, code, ARMREG_IP0, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP0, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
code = emit_strx (code, ARMREG_IP1, ins->inst_basereg, ins->inst_offset);
} else {
MonoInst *ins;
if (cfg->arch.ss_tramp_var) {
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_imm64 (code, ARMREG_IP0, (guint64)&ss_trampoline);
code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
}
if (cfg->arch.bp_tramp_var) {
/* Initialize bp_tramp_var */
ins = cfg->arch.bp_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_imm64 (code, ARMREG_IP0, (guint64)bp_trampoline);
code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
}
}
max_offset = 0;
if (cfg->opt & MONO_OPT_BRANCH) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
bb->max_offset = max_offset;
MONO_BB_FOR_EACH_INS (bb, ins) {
max_offset += ins_get_size (ins->opcode);
}
}
}
if (max_offset > 0x3ffff * 4)
cfg->arch.cond_branch_islands = TRUE;
return code;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
CallInfo *cinfo;
int max_epilog_size;
guint8 *code;
int i;
max_epilog_size = 16 + 20*4;
code = realloc_code (cfg, max_epilog_size);
if (cfg->method->save_lmf) {
code = mono_arm_emit_load_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs) - (MONO_ARCH_FIRST_LMF_REG * 8));
} else {
/* Restore gregs */
code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset);
}
/* Load returned vtypes into registers if needed */
cinfo = cfg->arch.cinfo;
switch (cinfo->ret.storage) {
case ArgVtypeInIRegs: {
MonoInst *ins = cfg->ret;
for (i = 0; i < cinfo->ret.nregs; ++i)
code = emit_ldrx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * 8));
break;
}
case ArgHFA: {
MonoInst *ins = cfg->ret;
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
code = emit_ldrfpw (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]);
else
code = emit_ldrfpx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]);
}
break;
}
default:
break;
}
/* Destroy frame */
code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, (1 << ARMREG_IP0) | (1 << ARMREG_IP1));
if (enable_ptrauth)
arm_retab (code);
else
arm_retx (code, ARMREG_LR);
g_assert (code - (cfg->native_code + cfg->code_len) < max_epilog_size);
set_code_cursor (cfg, code);
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *ji;
MonoClass *exc_class;
guint8 *code, *ip;
guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
int i, id, size = 0;
for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
exc_throw_pos [i] = NULL;
exc_throw_found [i] = 0;
}
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->type == MONO_PATCH_INFO_EXC) {
i = mini_exception_id_by_name ((const char*)ji->data.target);
if (!exc_throw_found [i]) {
size += 32;
exc_throw_found [i] = TRUE;
}
}
}
code = realloc_code (cfg, size);
/* Emit code to raise corlib exceptions */
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->type != MONO_PATCH_INFO_EXC)
continue;
ip = cfg->native_code + ji->ip.i;
id = mini_exception_id_by_name ((const char*)ji->data.target);
if (exc_throw_pos [id]) {
/* ip points to the bcc () in OP_COND_EXC_... */
arm_patch_rel (ip, exc_throw_pos [id], ji->relocation);
ji->type = MONO_PATCH_INFO_NONE;
continue;
}
exc_throw_pos [id] = code;
arm_patch_rel (ip, code, ji->relocation);
/* We are being branched to from the code generated by emit_cond_exc (), the pc is in ip1 */
/* r0 = type token */
exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", ji->data.name);
code = emit_imm (code, ARMREG_R0, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF);
/* r1 = throw ip */
arm_movx (code, ARMREG_R1, ARMREG_IP1);
/* Branch to the corlib exception throwing trampoline */
ji->ip.i = code - cfg->native_code;
ji->type = MONO_PATCH_INFO_JIT_ICALL_ID;
ji->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
ji->relocation = MONO_R_ARM64_BL;
arm_bl (code, 0);
cfg->thunk_area += THUNK_SIZE;
set_code_cursor (cfg, code);
}
set_code_cursor (cfg, code);
}
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return NULL;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return 0;
}
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int i, buf_len, imt_reg;
guint8 *buf, *code;
#if DEBUG_IMT
printf ("building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
}
#endif
buf_len = 0;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
buf_len += 4 * 4 + 4;
}
buf_len += 4;
if (item->has_target_code) {
buf_len += 5 * 4;
} else {
buf_len += 6 * 4;
}
if (fail_case) {
buf_len += 5 * 4;
}
} else {
buf_len += 6 * 4;
}
} else {
buf_len += 6 * 4;
}
}
if (fail_tramp) {
buf = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, buf_len);
} else {
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
buf = mono_mem_manager_code_reserve (mem_manager, buf_len);
}
code = buf;
MINI_BEGIN_CODEGEN ();
/*
* We are called by JITted code, which passes in the IMT argument in
* MONO_ARCH_RGCTX_REG (r27). We need to preserve all caller saved regs
* except ip0/ip1.
*/
imt_reg = MONO_ARCH_RGCTX_REG;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
if (item->is_equals) {
/*
* Check the imt argument against item->key, if equals, jump to either
* item->value.target_code or to vtable [item->value.vtable_slot].
* If fail_tramp is set, jump to it if not-equals.
*/
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
/* Compare imt_reg with item->key */
if (!item->compare_done || fail_case) {
// FIXME: Optimize this
code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key);
arm_cmpx (code, imt_reg, ARMREG_IP0);
}
item->jmp_code = code;
arm_bcc (code, ARMCOND_NE, 0);
/* Jump to target if equals */
if (item->has_target_code) {
code = emit_imm64 (code, ARMREG_IP0, (guint64)item->value.target_code);
code = mono_arm_emit_brx (code, ARMREG_IP0);
} else {
guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]);
code = emit_imm64 (code, ARMREG_IP0, imm);
arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0);
code = mono_arm_emit_brx (code, ARMREG_IP0);
}
if (fail_case) {
arm_patch_rel (item->jmp_code, code, MONO_R_ARM64_BCC);
item->jmp_code = NULL;
code = emit_imm64 (code, ARMREG_IP0, (guint64)fail_tramp);
code = mono_arm_emit_brx (code, ARMREG_IP0);
}
} else {
guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]);
code = emit_imm64 (code, ARMREG_IP0, imm);
arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0);
code = mono_arm_emit_brx (code, ARMREG_IP0);
}
} else {
code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key);
arm_cmpx (code, imt_reg, ARMREG_IP0);
item->jmp_code = code;
arm_bcc (code, ARMCOND_HS, 0);
}
}
/* Patch the branches */
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code && item->check_target_idx)
arm_patch_rel (item->jmp_code, imt_entries [item->check_target_idx]->code_target, MONO_R_ARM64_BCC);
}
g_assert ((code - buf) <= buf_len);
MINI_END_CODEGEN (buf, code - buf, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
return MINI_ADDR_TO_FTNPTR (buf);
}
GSList *
mono_arch_get_trampolines (gboolean aot)
{
return mono_arm_get_exception_trampolines (aot);
}
#else /* DISABLE_JIT */
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
g_assert_not_reached ();
return NULL;
}
#endif /* !DISABLE_JIT */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = MINI_FTNPTR_TO_ADDR (ip);
guint32 native_offset = ip - (guint8*)ji->code_start;
if (ji->from_aot) {
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (enable_ptrauth)
NOT_IMPLEMENTED;
g_assert (native_offset % 4 == 0);
g_assert (info->bp_addrs [native_offset / 4] == 0);
info->bp_addrs [native_offset / 4] = (guint8*)mini_get_breakpoint_trampoline ();
} else {
/* ip points to an ldrx */
code += 4;
mono_codeman_enable_write ();
code = mono_arm_emit_blrx (code, ARMREG_IP0);
mono_codeman_disable_write ();
mono_arch_flush_icache (ip, code - ip);
}
}
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = MINI_FTNPTR_TO_ADDR (ip);
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (enable_ptrauth)
NOT_IMPLEMENTED;
g_assert (native_offset % 4 == 0);
info->bp_addrs [native_offset / 4] = NULL;
} else {
/* ip points to an ldrx */
code += 4;
mono_codeman_enable_write ();
arm_nop (code);
mono_codeman_disable_write ();
mono_arch_flush_icache (ip, code - ip);
}
}
void
mono_arch_start_single_stepping (void)
{
ss_trampoline = mini_get_single_step_trampoline ();
}
void
mono_arch_stop_single_stepping (void)
{
ss_trampoline = NULL;
}
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
/* We use soft breakpoints on arm64 */
return FALSE;
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
/* We use soft breakpoints on arm64 */
return FALSE;
}
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
g_assert_not_reached ();
}
void
mono_arch_skip_single_step (MonoContext *ctx)
{
g_assert_not_reached ();
}
SeqPointInfo*
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
jit_mm = get_default_jit_mm ();
// FIXME: Add a free function
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size / 4) * sizeof(guint8*));
info->ss_tramp_addr = &ss_trampoline;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
#endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_U8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8:
return TRUE;
default:
return FALSE;
}
}
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
return get_call_info (mp, sig);
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
gpointer target = NULL;
switch (jit_icall_id) {
#undef MONO_AOT_ICALL
#define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
MONO_AOT_ICALL (mono_arm_resume_unwind)
MONO_AOT_ICALL (mono_arm_start_gsharedvt_call)
MONO_AOT_ICALL (mono_arm_throw_exception)
}
return target;
}
static guint8*
emit_blrx (guint8 *code, int reg)
{
if (enable_ptrauth)
arm_blraaz (code, reg);
else
arm_blrx (code, reg);
return code;
}
static guint8*
emit_brx (guint8 *code, int reg)
{
if (enable_ptrauth)
arm_braaz (code, reg);
else
arm_brx (code, reg);
return code;
}
guint8*
mono_arm_emit_blrx (guint8 *code, int reg)
{
return emit_blrx (code, reg);
}
guint8*
mono_arm_emit_brx (guint8 *code, int reg)
{
return emit_brx (code, reg);
}
| /**
* \file
* ARM64 backend for the Mono code generator
*
* Copyright 2013 Xamarin, Inc (http://www.xamarin.com)
*
* Based on mini-arm.c:
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* (C) 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include "cpu-arm64.h"
#include "ir-emit.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
#include <mono/arch/arm64/arm64-codegen.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-memory-model.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/tokentype.h>
#include "interp/interp.h"
/*
* Documentation:
*
* - ARM(R) Architecture Reference Manual, ARMv8, for ARMv8-A architecture profile (DDI0487A_a_armv8_arm.pdf)
* - Procedure Call Standard for the ARM 64-bit Architecture (AArch64) (IHI0055B_aapcs64.pdf)
* - ELF for the ARM 64-bit Architecture (IHI0056B_aaelf64.pdf)
*
* Register usage:
* - ip0/ip1/lr are used as temporary registers
* - r27 is used as the rgctx/imt register
* - r28 is used to access arguments passed on the stack
* - d15/d16 are used as fp temporary registers
*/
#define FP_TEMP_REG ARMREG_D16
#define FP_TEMP_REG2 ARMREG_D17
#define THUNK_SIZE (4 * 4)
/* The single step trampoline */
static gpointer ss_trampoline;
/* The breakpoint trampoline */
static gpointer bp_trampoline;
static gboolean ios_abi;
static gboolean enable_ptrauth;
#if defined(HOST_WIN32)
#define WARN_UNUSED_RESULT _Check_return_
#else
#define WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__))
#endif
static WARN_UNUSED_RESULT guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset);
static guint8* emit_brx (guint8 *code, int reg);
static guint8* emit_blrx (guint8 *code, int reg);
const char*
mono_arch_regname (int reg)
{
static const char * rnames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
"r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "fp",
"lr", "sp"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown";
}
const char*
mono_arch_fregname (int reg)
{
static const char * rnames[] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
"d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
"d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
"d30", "d31"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown fp";
}
const char *
mono_arch_xregname (int reg)
{
static const char * rnames[] = {
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31"
};
if (reg >= 0 && reg < 32)
return rnames [reg];
return "unknown";
}
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
NOT_IMPLEMENTED;
return 0;
}
#define MAX_ARCH_DELEGATE_PARAMS 7
static gpointer
get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
{
guint8 *code, *start;
MINI_BEGIN_CODEGEN ();
if (has_target) {
start = code = mono_global_codeman_reserve (12);
/* Replace the this argument with the target */
arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
arm_ldrx (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
code = mono_arm_emit_brx (code, ARMREG_IP0);
g_assert ((code - start) <= 12);
} else {
int size, i;
size = 8 + param_count * 4;
start = code = mono_global_codeman_reserve (size);
arm_ldrx (code, ARMREG_IP0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
/* slide down the arguments */
for (i = 0; i < param_count; ++i)
arm_movx (code, i, i + 1);
code = mono_arm_emit_brx (code, ARMREG_IP0);
g_assert ((code - start) <= size);
}
MINI_END_CODEGEN (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
if (code_size)
*code_size = code - start;
return MINI_ADDR_TO_FTNPTR (start);
}
/*
* mono_arch_get_delegate_invoke_impls:
*
* Return a list of MonoAotTrampInfo structures for the delegate invoke impl
* trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
guint8 *code;
guint32 code_len;
int i;
char *tramp_name;
code = (guint8*)get_delegate_invoke_impl (TRUE, 0, &code_len);
res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
code = (guint8*)get_delegate_invoke_impl (FALSE, i, &code_len);
tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
g_free (tramp_name);
}
return res;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
/*
* vtypes are returned in registers, or using the dedicated r8 register, so
* they can be supported by delegate invokes.
*/
if (has_target) {
static guint8* cached = NULL;
if (cached)
return cached;
if (mono_ee_features.use_aot_trampolines)
start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
else
start = (guint8*)get_delegate_invoke_impl (TRUE, 0, NULL);
mono_memory_barrier ();
cached = start;
return cached;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
code = cache [sig->param_count];
if (code)
return code;
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8*)mono_aot_get_trampoline (name);
g_free (name);
} else {
start = (guint8*)get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
}
mono_memory_barrier ();
cache [sig->param_count] = start;
return start;
}
return NULL;
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
return NULL;
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [ARMREG_R0];
}
void
mono_arch_cpu_init (void)
{
}
void
mono_arch_init (void)
{
#if defined(TARGET_IOS) || defined(TARGET_WATCHOS) || defined(TARGET_OSX)
ios_abi = TRUE;
#endif
#ifdef MONO_ARCH_ENABLE_PTRAUTH
enable_ptrauth = TRUE;
#endif
if (!mono_aot_only)
bp_trampoline = mini_get_breakpoint_trampoline ();
mono_arm_gsharedvt_init ();
}
void
mono_arch_cleanup (void)
{
}
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
*exclude_mask = 0;
return 0;
}
void
mono_arch_register_lowlevel_calls (void)
{
}
void
mono_arch_finish_init (void)
{
}
/* The maximum length is 2 instructions */
static guint8*
emit_imm (guint8 *code, int dreg, int imm)
{
// FIXME: Optimize this
if (imm < 0) {
gint64 limm = imm;
arm_movnx (code, dreg, (~limm) & 0xffff, 0);
arm_movkx (code, dreg, (limm >> 16) & 0xffff, 16);
} else {
arm_movzx (code, dreg, imm & 0xffff, 0);
if (imm >> 16)
arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16);
}
return code;
}
/* The maximum length is 4 instructions */
static guint8*
emit_imm64 (guint8 *code, int dreg, guint64 imm)
{
// FIXME: Optimize this
arm_movzx (code, dreg, imm & 0xffff, 0);
if ((imm >> 16) & 0xffff)
arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16);
if ((imm >> 32) & 0xffff)
arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32);
if ((imm >> 48) & 0xffff)
arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48);
return code;
}
guint8*
mono_arm_emit_imm64 (guint8 *code, int dreg, gint64 imm)
{
return emit_imm64 (code, dreg, imm);
}
/*
* emit_imm_template:
*
* Emit a patchable code sequence for constructing a 64 bit immediate.
*/
static guint8*
emit_imm64_template (guint8 *code, int dreg)
{
arm_movzx (code, dreg, 0, 0);
arm_movkx (code, dreg, 0, 16);
arm_movkx (code, dreg, 0, 32);
arm_movkx (code, dreg, 0, 48);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_addw_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_addw (code, dreg, sreg, ARMREG_LR);
} else {
arm_addw_imm (code, dreg, sreg, imm);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_addx_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_addx (code, dreg, sreg, ARMREG_LR);
} else {
arm_addx_imm (code, dreg, sreg, imm);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_subw_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_subw (code, dreg, sreg, ARMREG_LR);
} else {
arm_subw_imm (code, dreg, sreg, imm);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_subx_imm (guint8 *code, int dreg, int sreg, int imm)
{
if (!arm_is_arith_imm (imm)) {
code = emit_imm (code, ARMREG_LR, imm);
arm_subx (code, dreg, sreg, ARMREG_LR);
} else {
arm_subx_imm (code, dreg, sreg, imm);
}
return code;
}
/* Emit sp+=imm. Clobbers ip0/ip1 */
static WARN_UNUSED_RESULT guint8*
emit_addx_sp_imm (guint8 *code, int imm)
{
code = emit_imm (code, ARMREG_IP0, imm);
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_addx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0);
arm_movspx (code, ARMREG_SP, ARMREG_IP1);
return code;
}
/* Emit sp-=imm. Clobbers ip0/ip1 */
static WARN_UNUSED_RESULT guint8*
emit_subx_sp_imm (guint8 *code, int imm)
{
code = emit_imm (code, ARMREG_IP0, imm);
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0);
arm_movspx (code, ARMREG_SP, ARMREG_IP1);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_andw_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_andw (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_andx_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_andx (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_orrw_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_orrw (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_orrx_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_orrx (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_eorw_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_eorw (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_eorx_imm (guint8 *code, int dreg, int sreg, int imm)
{
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_eorx (code, dreg, sreg, ARMREG_LR);
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_cmpw_imm (guint8 *code, int sreg, int imm)
{
if (imm == 0) {
arm_cmpw (code, sreg, ARMREG_RZR);
} else {
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_cmpw (code, sreg, ARMREG_LR);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_cmpx_imm (guint8 *code, int sreg, int imm)
{
if (imm == 0) {
arm_cmpx (code, sreg, ARMREG_RZR);
} else {
// FIXME:
code = emit_imm (code, ARMREG_LR, imm);
arm_cmpx (code, sreg, ARMREG_LR);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strb (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strb_imm (imm)) {
arm_strb (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strb_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strh (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strh_imm (imm)) {
arm_strh (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strh_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strw_imm (imm)) {
arm_strw (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strw_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strfpw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strw_imm (imm)) {
arm_strfpw (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_strfpw (code, rt, ARMREG_IP0, 0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strfpx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strx_imm (imm)) {
arm_strfpx (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_strfpx (code, rt, ARMREG_IP0, 0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_strx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_strx_imm (imm)) {
arm_strx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_strx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrb (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 1)) {
arm_ldrb (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrb_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrsbx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 1)) {
arm_ldrsbx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrsbx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrh (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 2)) {
arm_ldrh (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrh_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrshx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 2)) {
arm_ldrshx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrshx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrswx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 4)) {
arm_ldrswx (code, rt, rn, imm);
} else {
g_assert (rt != ARMREG_IP0);
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrswx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 4)) {
arm_ldrw (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrw_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 8)) {
arm_ldrx (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_ldrx_reg (code, rt, rn, ARMREG_IP0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrfpw (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 4)) {
arm_ldrfpw (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_ldrfpw (code, rt, ARMREG_IP0, 0);
}
return code;
}
static WARN_UNUSED_RESULT guint8*
emit_ldrfpx (guint8 *code, int rt, int rn, int imm)
{
if (arm_is_pimm12_scaled (imm, 8)) {
arm_ldrfpx (code, rt, rn, imm);
} else {
g_assert (rn != ARMREG_IP0);
code = emit_imm (code, ARMREG_IP0, imm);
arm_addx (code, ARMREG_IP0, rn, ARMREG_IP0);
arm_ldrfpx (code, rt, ARMREG_IP0, 0);
}
return code;
}
guint8*
mono_arm_emit_ldrx (guint8 *code, int rt, int rn, int imm)
{
return emit_ldrx (code, rt, rn, imm);
}
static guint8*
emit_call (MonoCompile *cfg, guint8* code, MonoJumpInfoType patch_type, gconstpointer data)
{
/*
mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_IMM);
code = emit_imm64_template (code, ARMREG_LR);
arm_blrx (code, ARMREG_LR);
*/
mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_BL);
arm_bl (code, code);
cfg->thunk_area += THUNK_SIZE;
return code;
}
static guint8*
emit_aotconst_full (MonoCompile *cfg, MonoJumpInfo **ji, guint8 *code, guint8 *start, int dreg, guint32 patch_type, gconstpointer data)
{
if (cfg)
mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
else
*ji = mono_patch_info_list_prepend (*ji, code - start, (MonoJumpInfoType)patch_type, data);
/* See arch_emit_got_access () in aot-compiler.c */
arm_ldrx_lit (code, dreg, 0);
arm_nop (code);
arm_nop (code);
return code;
}
static guint8*
emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, guint32 patch_type, gconstpointer data)
{
return emit_aotconst_full (cfg, NULL, code, NULL, dreg, patch_type, data);
}
/*
* mono_arm_emit_aotconst:
*
* Emit code to load an AOT constant into DREG. Usable from trampolines.
*/
guint8*
mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *code_start, int dreg, guint32 patch_type, gconstpointer data)
{
return emit_aotconst_full (NULL, (MonoJumpInfo**)ji, code, code_start, dreg, patch_type, data);
}
gboolean
mono_arch_have_fast_tls (void)
{
#ifdef TARGET_IOS
return FALSE;
#else
return TRUE;
#endif
}
static guint8*
emit_tls_get (guint8 *code, int dreg, int tls_offset)
{
arm_mrs (code, dreg, ARM_MRS_REG_TPIDR_EL0);
if (tls_offset < 256) {
arm_ldrx (code, dreg, dreg, tls_offset);
} else {
code = emit_addx_imm (code, dreg, dreg, tls_offset);
arm_ldrx (code, dreg, dreg, 0);
}
return code;
}
static guint8*
emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
int tmpreg = ARMREG_IP0;
g_assert (sreg != tmpreg);
arm_mrs (code, tmpreg, ARM_MRS_REG_TPIDR_EL0);
if (tls_offset < 256) {
arm_strx (code, sreg, tmpreg, tls_offset);
} else {
code = emit_addx_imm (code, tmpreg, tmpreg, tls_offset);
arm_strx (code, sreg, tmpreg, 0);
}
return code;
}
/*
* Emits
* - mov sp, fp
* - ldrp [fp, lr], [sp], !stack_offfset
* Clobbers TEMP_REGS.
*/
WARN_UNUSED_RESULT guint8*
mono_arm_emit_destroy_frame (guint8 *code, int stack_offset, guint64 temp_regs)
{
// At least one of these registers must be available, or both.
gboolean const temp0 = (temp_regs & (1 << ARMREG_IP0)) != 0;
gboolean const temp1 = (temp_regs & (1 << ARMREG_IP1)) != 0;
g_assert (temp0 || temp1);
int const temp = temp0 ? ARMREG_IP0 : ARMREG_IP1;
arm_movspx (code, ARMREG_SP, ARMREG_FP);
if (arm_is_ldpx_imm (stack_offset)) {
arm_ldpx_post (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, stack_offset);
} else {
arm_ldpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0);
/* sp += stack_offset */
if (temp0 && temp1) {
code = emit_addx_sp_imm (code, stack_offset);
} else {
int imm = stack_offset;
/* Can't use addx_sp_imm () since we can't clobber both ip0/ip1 */
arm_addx_imm (code, temp, ARMREG_SP, 0);
while (imm > 256) {
arm_addx_imm (code, temp, temp, 256);
imm -= 256;
}
arm_addx_imm (code, ARMREG_SP, temp, imm);
}
}
return code;
}
#define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
static guint8*
emit_thunk (guint8 *code, gconstpointer target)
{
guint8 *p = code;
arm_ldrx_lit (code, ARMREG_IP0, code + 8);
arm_brx (code, ARMREG_IP0);
*(guint64*)code = (guint64)target;
code += sizeof (guint64);
mono_arch_flush_icache (p, code - p);
return code;
}
static gpointer
create_thunk (MonoCompile *cfg, guchar *code, const guchar *target)
{
MonoJitInfo *ji;
MonoThunkJitInfo *info;
guint8 *thunks, *p;
int thunks_size;
guint8 *orig_target;
guint8 *target_thunk;
MonoJitMemoryManager* jit_mm;
if (cfg) {
/*
* This can be called multiple times during JITting,
* save the current position in cfg->arch to avoid
* doing a O(n^2) search.
*/
if (!cfg->arch.thunks) {
cfg->arch.thunks = cfg->thunks;
cfg->arch.thunks_size = cfg->thunk_area;
}
thunks = cfg->arch.thunks;
thunks_size = cfg->arch.thunks_size;
if (!thunks_size) {
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
g_assert_not_reached ();
}
g_assert (*(guint32*)thunks == 0);
emit_thunk (thunks, target);
cfg->arch.thunks += THUNK_SIZE;
cfg->arch.thunks_size -= THUNK_SIZE;
return thunks;
} else {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = mono_jit_info_get_thunk_info (ji);
g_assert (info);
thunks = (guint8*)ji->code_start + info->thunks_offset;
thunks_size = info->thunks_size;
orig_target = mono_arch_get_call_target (code + 4);
/* Arbitrary lock */
jit_mm = get_default_jit_mm ();
jit_mm_lock (jit_mm);
target_thunk = NULL;
if (orig_target >= thunks && orig_target < thunks + thunks_size) {
/* The call already points to a thunk, because of trampolines etc. */
target_thunk = orig_target;
} else {
for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
if (((guint32*)p) [0] == 0) {
/* Free entry */
target_thunk = p;
break;
} else if (((guint64*)p) [1] == (guint64)target) {
/* Thunk already points to target */
target_thunk = p;
break;
}
}
}
//printf ("THUNK: %p %p %p\n", code, target, target_thunk);
if (!target_thunk) {
jit_mm_unlock (jit_mm);
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
g_assert_not_reached ();
}
emit_thunk (target_thunk, target);
jit_mm_unlock (jit_mm);
return target_thunk;
}
}
static void
arm_patch_full (MonoCompile *cfg, guint8 *code, guint8 *target, int relocation)
{
switch (relocation) {
case MONO_R_ARM64_B:
target = MINI_FTNPTR_TO_ADDR (target);
if (arm_is_bl_disp (code, target)) {
arm_b (code, target);
} else {
gpointer thunk;
thunk = create_thunk (cfg, code, target);
g_assert (arm_is_bl_disp (code, thunk));
arm_b (code, thunk);
}
break;
case MONO_R_ARM64_BCC: {
int cond;
cond = arm_get_bcc_cond (code);
arm_bcc (code, cond, target);
break;
}
case MONO_R_ARM64_CBZ:
arm_set_cbz_target (code, target);
break;
case MONO_R_ARM64_IMM: {
guint64 imm = (guint64)target;
int dreg;
/* emit_imm64_template () */
dreg = arm_get_movzx_rd (code);
arm_movzx (code, dreg, imm & 0xffff, 0);
arm_movkx (code, dreg, (imm >> 16) & 0xffff, 16);
arm_movkx (code, dreg, (imm >> 32) & 0xffff, 32);
arm_movkx (code, dreg, (imm >> 48) & 0xffff, 48);
break;
}
case MONO_R_ARM64_BL:
target = MINI_FTNPTR_TO_ADDR (target);
if (arm_is_bl_disp (code, target)) {
arm_bl (code, target);
} else {
gpointer thunk;
thunk = create_thunk (cfg, code, target);
g_assert (arm_is_bl_disp (code, thunk));
arm_bl (code, thunk);
}
break;
default:
g_assert_not_reached ();
}
}
static void
arm_patch_rel (guint8 *code, guint8 *target, int relocation)
{
arm_patch_full (NULL, code, target, relocation);
}
void
mono_arm_patch (guint8 *code, guint8 *target, int relocation)
{
arm_patch_rel (code, target, relocation);
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
guint8 *ip;
ip = ji->ip.i + code;
switch (ji->type) {
case MONO_PATCH_INFO_METHOD_JUMP:
/* ji->relocation is not set by the caller */
arm_patch_full (cfg, ip, (guint8*)target, MONO_R_ARM64_B);
mono_arch_flush_icache (ip, 8);
break;
default:
arm_patch_full (cfg, ip, (guint8*)target, ji->relocation);
break;
case MONO_PATCH_INFO_NONE:
break;
}
}
void
mono_arch_flush_register_windows (void)
{
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod*)regs [MONO_ARCH_RGCTX_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*)regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
return l;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->regs [reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->regs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->regs [reg] = val;
}
/*
* mono_arch_set_target:
*
* Set the target architecture the JIT backend should generate code for, in the form
* of a GNU target triplet. Only used in AOT mode.
*/
void
mono_arch_set_target (char *mtriple)
{
if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
ios_abi = TRUE;
}
}
static void
add_general (CallInfo *cinfo, ArgInfo *ainfo, int size, gboolean sign)
{
if (cinfo->gr >= PARAM_REGS) {
ainfo->storage = ArgOnStack;
/*
* FIXME: The vararg argument handling code in ves_icall_System_ArgIterator_IntGetNextArg
* assumes every argument is allocated to a separate full size stack slot.
*/
if (ios_abi && !cinfo->vararg) {
/* Assume size == align */
} else {
/* Put arguments into 8 byte aligned stack slots */
size = 8;
sign = FALSE;
}
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size);
ainfo->offset = cinfo->stack_usage;
ainfo->slot_size = size;
ainfo->sign = sign;
cinfo->stack_usage += size;
} else {
ainfo->storage = ArgInIReg;
ainfo->reg = cinfo->gr;
cinfo->gr ++;
}
}
static void
add_fp (CallInfo *cinfo, ArgInfo *ainfo, gboolean single)
{
int size = single ? 4 : 8;
if (cinfo->fr >= FP_PARAM_REGS) {
ainfo->storage = single ? ArgOnStackR4 : ArgOnStackR8;
if (ios_abi) {
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, size);
ainfo->offset = cinfo->stack_usage;
ainfo->slot_size = size;
cinfo->stack_usage += size;
} else {
ainfo->offset = cinfo->stack_usage;
ainfo->slot_size = 8;
/* Put arguments into 8 byte aligned stack slots */
cinfo->stack_usage += 8;
}
} else {
if (single)
ainfo->storage = ArgInFRegR4;
else
ainfo->storage = ArgInFReg;
ainfo->reg = cinfo->fr;
cinfo->fr ++;
}
}
static gboolean
is_hfa (MonoType *t, int *out_nfields, int *out_esize, int *field_offsets)
{
MonoClass *klass;
gpointer iter;
MonoClassField *field;
MonoType *ftype, *prev_ftype = NULL;
int i, nfields = 0;
klass = mono_class_from_mono_type_internal (t);
iter = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
ftype = mono_field_get_type_internal (field);
ftype = mini_get_underlying_type (ftype);
if (MONO_TYPE_ISSTRUCT (ftype)) {
int nested_nfields, nested_esize;
int nested_field_offsets [16];
if (!is_hfa (ftype, &nested_nfields, &nested_esize, nested_field_offsets))
return FALSE;
if (nested_esize == 4)
ftype = m_class_get_byval_arg (mono_defaults.single_class);
else
ftype = m_class_get_byval_arg (mono_defaults.double_class);
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
for (i = 0; i < nested_nfields; ++i) {
if (nfields + i < 4)
field_offsets [nfields + i] = field->offset - MONO_ABI_SIZEOF (MonoObject) + nested_field_offsets [i];
}
nfields += nested_nfields;
} else {
if (!(!m_type_is_byref (ftype) && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
return FALSE;
if (prev_ftype && prev_ftype->type != ftype->type)
return FALSE;
prev_ftype = ftype;
if (nfields < 4)
field_offsets [nfields] = field->offset - MONO_ABI_SIZEOF (MonoObject);
nfields ++;
}
}
if (nfields == 0 || nfields > 4)
return FALSE;
*out_nfields = nfields;
*out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
return TRUE;
}
static void
add_valuetype (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t)
{
int i, size, align_size, nregs, nfields, esize;
int field_offsets [16];
guint32 align;
size = mini_type_stack_size_full (t, &align, cinfo->pinvoke);
align_size = ALIGN_TO (size, 8);
nregs = align_size / 8;
if (is_hfa (t, &nfields, &esize, field_offsets)) {
/*
* The struct might include nested float structs aligned at 8,
* so need to keep track of the offsets of the individual fields.
*/
if (cinfo->fr + nfields <= FP_PARAM_REGS) {
ainfo->storage = ArgHFA;
ainfo->reg = cinfo->fr;
ainfo->nregs = nfields;
ainfo->size = size;
ainfo->esize = esize;
for (i = 0; i < nfields; ++i)
ainfo->foffsets [i] = field_offsets [i];
cinfo->fr += ainfo->nregs;
} else {
ainfo->nfregs_to_skip = FP_PARAM_REGS > cinfo->fr ? FP_PARAM_REGS - cinfo->fr : 0;
cinfo->fr = FP_PARAM_REGS;
size = ALIGN_TO (size, 8);
ainfo->storage = ArgVtypeOnStack;
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
ainfo->offset = cinfo->stack_usage;
ainfo->size = size;
ainfo->hfa = TRUE;
ainfo->nregs = nfields;
ainfo->esize = esize;
cinfo->stack_usage += size;
}
return;
}
if (align_size > 16) {
ainfo->storage = ArgVtypeByRef;
ainfo->size = size;
return;
}
if (cinfo->gr + nregs > PARAM_REGS) {
size = ALIGN_TO (size, 8);
ainfo->storage = ArgVtypeOnStack;
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
ainfo->offset = cinfo->stack_usage;
ainfo->size = size;
cinfo->stack_usage += size;
cinfo->gr = PARAM_REGS;
} else {
ainfo->storage = ArgVtypeInIRegs;
ainfo->reg = cinfo->gr;
ainfo->nregs = nregs;
ainfo->size = size;
cinfo->gr += nregs;
}
}
static void
add_param (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t)
{
MonoType *ptype;
ptype = mini_get_underlying_type (t);
switch (ptype->type) {
case MONO_TYPE_I1:
add_general (cinfo, ainfo, 1, TRUE);
break;
case MONO_TYPE_U1:
add_general (cinfo, ainfo, 1, FALSE);
break;
case MONO_TYPE_I2:
add_general (cinfo, ainfo, 2, TRUE);
break;
case MONO_TYPE_U2:
add_general (cinfo, ainfo, 2, FALSE);
break;
#ifdef MONO_ARCH_ILP32
case MONO_TYPE_I:
#endif
case MONO_TYPE_I4:
add_general (cinfo, ainfo, 4, TRUE);
break;
#ifdef MONO_ARCH_ILP32
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
#endif
case MONO_TYPE_U4:
add_general (cinfo, ainfo, 4, FALSE);
break;
#ifndef MONO_ARCH_ILP32
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
#endif
case MONO_TYPE_U8:
case MONO_TYPE_I8:
add_general (cinfo, ainfo, 8, FALSE);
break;
case MONO_TYPE_R8:
add_fp (cinfo, ainfo, FALSE);
break;
case MONO_TYPE_R4:
add_fp (cinfo, ainfo, TRUE);
break;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
add_valuetype (cinfo, ainfo, ptype);
break;
case MONO_TYPE_VOID:
ainfo->storage = ArgNone;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (cinfo, ainfo, 8, FALSE);
} else if (mini_is_gsharedvt_variable_type (ptype)) {
/*
* Treat gsharedvt arguments as large vtypes
*/
ainfo->storage = ArgVtypeByRef;
ainfo->gsharedvt = TRUE;
} else {
add_valuetype (cinfo, ainfo, ptype);
}
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (ptype));
ainfo->storage = ArgVtypeByRef;
ainfo->gsharedvt = TRUE;
break;
default:
g_assert_not_reached ();
break;
}
}
/*
* get_call_info:
*
* Obtain information about a call according to the calling convention.
*/
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
CallInfo *cinfo;
ArgInfo *ainfo;
int n, pstart, pindex;
n = sig->hasthis + sig->param_count;
if (mp)
cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
cinfo->pinvoke = sig->pinvoke;
// Constrain this to OSX only for now
#ifdef TARGET_OSX
cinfo->vararg = sig->call_convention == MONO_CALL_VARARG;
#endif
/* Return value */
add_param (cinfo, &cinfo->ret, sig->ret);
if (cinfo->ret.storage == ArgVtypeByRef)
cinfo->ret.reg = ARMREG_R8;
/* Reset state */
cinfo->gr = 0;
cinfo->fr = 0;
cinfo->stack_usage = 0;
/* Parameters */
if (sig->hasthis)
add_general (cinfo, cinfo->args + 0, 8, FALSE);
pstart = 0;
for (pindex = pstart; pindex < sig->param_count; ++pindex) {
ainfo = cinfo->args + sig->hasthis + pindex;
if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
cinfo->gr = PARAM_REGS;
cinfo->fr = FP_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ());
}
add_param (cinfo, ainfo, sig->params [pindex]);
if (ainfo->storage == ArgVtypeByRef) {
/* Pass the argument address in the next register */
if (cinfo->gr >= PARAM_REGS) {
ainfo->storage = ArgVtypeByRefOnStack;
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8);
ainfo->offset = cinfo->stack_usage;
cinfo->stack_usage += 8;
} else {
ainfo->reg = cinfo->gr;
cinfo->gr ++;
}
}
}
/* Handle the case where there are no implicit arguments */
if ((sig->call_convention == MONO_CALL_VARARG) && (pindex == sig->sentinelpos)) {
/* Prevent implicit arguments and sig_cookie from
being passed in registers */
cinfo->gr = PARAM_REGS;
cinfo->fr = FP_PARAM_REGS;
/* Emit the signature cookie just before the implicit arguments */
add_param (cinfo, &cinfo->sig_cookie, mono_get_int_type ());
}
cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
return cinfo;
}
static int
arg_need_temp (ArgInfo *ainfo)
{
if (ainfo->storage == ArgHFA && ainfo->esize == 4)
return ainfo->size;
return 0;
}
static gpointer
arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
{
switch (ainfo->storage) {
case ArgVtypeInIRegs:
case ArgInIReg:
return &ccontext->gregs [ainfo->reg];
case ArgInFReg:
case ArgInFRegR4:
case ArgHFA:
return &ccontext->fregs [ainfo->reg];
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
case ArgVtypeOnStack:
return ccontext->stack + ainfo->offset;
case ArgVtypeByRef:
return (gpointer) ccontext->gregs [ainfo->reg];
default:
g_error ("Arg storage type not yet supported");
}
}
static void
arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
g_assert (arg_need_temp (ainfo));
float *dest_float = (float*)dest;
for (int k = 0; k < ainfo->nregs; k++) {
*dest_float = *(float*)&ccontext->fregs [ainfo->reg + k];
dest_float++;
}
}
static void
arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
{
g_assert (arg_need_temp (ainfo));
float *src_float = (float*)src;
for (int k = 0; k < ainfo->nregs; k++) {
*(float*)&ccontext->fregs [ainfo->reg + k] = *src_float;
src_float++;
}
}
/* Set arguments in the ccontext (for i2n entry) */
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
memset (ccontext, 0, sizeof (CallContext));
ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
if (ccontext->stack_size)
ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgVtypeByRef) {
storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
ccontext->gregs [cinfo->ret.reg] = (gsize)storage;
}
}
g_assert (!sig->hasthis);
for (int i = 0; i < sig->param_count; i++) {
ainfo = &cinfo->args [i];
if (ainfo->storage == ArgVtypeByRef) {
ccontext->gregs [ainfo->reg] = (host_mgreg_t)interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, i);
continue;
}
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size); // FIXME? alloca in a loop
else
storage = arg_get_storage (ccontext, ainfo);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
/* Set return value in the ccontext (for n2i return) */
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
gpointer storage;
ArgInfo *ainfo;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (retp) {
g_assert (ainfo->storage == ArgVtypeByRef);
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, retp);
} else {
g_assert (ainfo->storage != ArgVtypeByRef);
int temp_size = arg_need_temp (ainfo);
if (temp_size)
storage = alloca (temp_size);
else
storage = arg_get_storage (ccontext, ainfo);
memset (ccontext, 0, sizeof (CallContext)); // FIXME
interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
if (temp_size)
arg_set_val (ccontext, ainfo, storage);
}
g_free (cinfo);
}
/* Gets the arguments from ccontext (for n2i entry) */
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
CallInfo *cinfo = get_call_info (NULL, sig);
gpointer storage;
ArgInfo *ainfo;
for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
ainfo = &cinfo->args [i];
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size); // FIXME? alloca in a loop
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
}
storage = NULL;
if (sig->ret->type != MONO_TYPE_VOID) {
ainfo = &cinfo->ret;
if (ainfo->storage == ArgVtypeByRef)
storage = (gpointer) ccontext->gregs [cinfo->ret.reg];
}
g_free (cinfo);
return storage;
}
/* Gets the return value from ccontext (for i2n exit) */
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
const MonoEECallbacks *interp_cb;
CallInfo *cinfo;
ArgInfo *ainfo;
gpointer storage;
if (sig->ret->type == MONO_TYPE_VOID)
return;
interp_cb = mini_get_interp_callbacks ();
cinfo = get_call_info (NULL, sig);
ainfo = &cinfo->ret;
if (ainfo->storage != ArgVtypeByRef) {
int temp_size = arg_need_temp (ainfo);
if (temp_size) {
storage = alloca (temp_size);
arg_get_val (ccontext, ainfo, storage);
} else {
storage = arg_get_storage (ccontext, ainfo);
}
interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
}
g_free (cinfo);
}
typedef struct {
MonoMethodSignature *sig;
CallInfo *cinfo;
MonoType *rtype;
MonoType **param_types;
int n_fpargs, n_fpret, nullable_area;
} ArchDynCallInfo;
static gboolean
dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
{
int i;
// FIXME: Add more cases
switch (cinfo->ret.storage) {
case ArgNone:
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
case ArgVtypeByRef:
break;
case ArgVtypeInIRegs:
if (cinfo->ret.nregs > 2)
return FALSE;
break;
case ArgHFA:
break;
default:
return FALSE;
}
for (i = 0; i < cinfo->nargs; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
switch (ainfo->storage) {
case ArgInIReg:
case ArgVtypeInIRegs:
case ArgInFReg:
case ArgInFRegR4:
case ArgHFA:
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
case ArgOnStack:
case ArgVtypeOnStack:
break;
default:
return FALSE;
}
}
return TRUE;
}
MonoDynCallInfo*
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
ArchDynCallInfo *info;
CallInfo *cinfo;
int i, aindex;
cinfo = get_call_info (NULL, sig);
if (!dyn_call_supported (cinfo, sig)) {
g_free (cinfo);
return NULL;
}
info = g_new0 (ArchDynCallInfo, 1);
// FIXME: Preprocess the info to speed up start_dyn_call ()
info->sig = sig;
info->cinfo = cinfo;
info->rtype = mini_get_underlying_type (sig->ret);
info->param_types = g_new0 (MonoType*, sig->param_count);
for (i = 0; i < sig->param_count; ++i)
info->param_types [i] = mini_get_underlying_type (sig->params [i]);
switch (cinfo->ret.storage) {
case ArgInFReg:
case ArgInFRegR4:
info->n_fpret = 1;
break;
case ArgHFA:
info->n_fpret = cinfo->ret.nregs;
break;
default:
break;
}
for (aindex = 0; aindex < sig->param_count; aindex++) {
MonoType *t = info->param_types [aindex];
if (m_type_is_byref (t))
continue;
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
int size;
/* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */
size = mono_class_value_size (klass, NULL);
info->nullable_area += size;
}
break;
default:
break;
}
}
return (MonoDynCallInfo*)info;
}
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_free (ainfo->cinfo);
g_free (ainfo->param_types);
g_free (ainfo);
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage + ainfo->nullable_area;
}
static double
bitcast_r4_to_r8 (float f)
{
float *p = &f;
return *(double*)p;
}
static float
bitcast_r8_to_r4 (double f)
{
double *p = &f;
return *(float*)p;
}
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
int aindex, arg_index, greg, i, pindex;
MonoMethodSignature *sig = dinfo->sig;
CallInfo *cinfo = dinfo->cinfo;
int buffer_offset = 0;
guint8 *nullable_buffer;
p->res = 0;
p->ret = ret;
p->n_fpargs = dinfo->n_fpargs;
p->n_fpret = dinfo->n_fpret;
p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
arg_index = 0;
greg = 0;
pindex = 0;
/* Stored after the stack arguments */
nullable_buffer = (guint8*)&(p->regs [PARAM_REGS + 1 + (cinfo->stack_usage / sizeof (host_mgreg_t))]);
if (sig->hasthis)
p->regs [greg ++] = (host_mgreg_t)*(args [arg_index ++]);
if (cinfo->ret.storage == ArgVtypeByRef)
p->regs [ARMREG_R8] = (host_mgreg_t)ret;
for (aindex = pindex; aindex < sig->param_count; aindex++) {
MonoType *t = dinfo->param_types [aindex];
gpointer *arg = args [arg_index ++];
ArgInfo *ainfo = &cinfo->args [aindex + sig->hasthis];
int slot = -1;
if (ainfo->storage == ArgOnStack || ainfo->storage == ArgVtypeOnStack || ainfo->storage == ArgVtypeByRefOnStack) {
slot = PARAM_REGS + 1 + (ainfo->offset / sizeof (host_mgreg_t));
} else {
slot = ainfo->reg;
}
if (m_type_is_byref (t)) {
p->regs [slot] = (host_mgreg_t)*arg;
continue;
}
if (ios_abi && ainfo->storage == ArgOnStack) {
guint8 *stack_arg = (guint8*)&(p->regs [PARAM_REGS + 1]) + ainfo->offset;
gboolean handled = TRUE;
/* Special case arguments smaller than 1 machine word */
switch (t->type) {
case MONO_TYPE_U1:
*(guint8*)stack_arg = *(guint8*)arg;
break;
case MONO_TYPE_I1:
*(gint8*)stack_arg = *(gint8*)arg;
break;
case MONO_TYPE_U2:
*(guint16*)stack_arg = *(guint16*)arg;
break;
case MONO_TYPE_I2:
*(gint16*)stack_arg = *(gint16*)arg;
break;
case MONO_TYPE_I4:
*(gint32*)stack_arg = *(gint32*)arg;
break;
case MONO_TYPE_U4:
*(guint32*)stack_arg = *(guint32*)arg;
break;
default:
handled = FALSE;
break;
}
if (handled)
continue;
}
switch (t->type) {
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
p->regs [slot] = (host_mgreg_t)*arg;
break;
case MONO_TYPE_U1:
p->regs [slot] = *(guint8*)arg;
break;
case MONO_TYPE_I1:
p->regs [slot] = *(gint8*)arg;
break;
case MONO_TYPE_I2:
p->regs [slot] = *(gint16*)arg;
break;
case MONO_TYPE_U2:
p->regs [slot] = *(guint16*)arg;
break;
case MONO_TYPE_I4:
p->regs [slot] = *(gint32*)arg;
break;
case MONO_TYPE_U4:
p->regs [slot] = *(guint32*)arg;
break;
case MONO_TYPE_R4:
p->fpregs [ainfo->reg] = bitcast_r4_to_r8 (*(float*)arg);
p->n_fpargs ++;
break;
case MONO_TYPE_R8:
p->fpregs [ainfo->reg] = *(double*)arg;
p->n_fpargs ++;
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
p->regs [slot] = (host_mgreg_t)*arg;
break;
} else {
if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
MonoClass *klass = mono_class_from_mono_type_internal (t);
guint8 *nullable_buf;
int size;
/*
* Use p->buffer as a temporary buffer since the data needs to be available after this call
* if the nullable param is passed by ref.
*/
size = mono_class_value_size (klass, NULL);
nullable_buf = nullable_buffer + buffer_offset;
buffer_offset += size;
g_assert (buffer_offset <= dinfo->nullable_area);
/* The argument pointed to by arg is either a boxed vtype or null */
mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
arg = (gpointer*)nullable_buf;
/* Fall though */
} else {
/* Fall though */
}
}
case MONO_TYPE_VALUETYPE:
switch (ainfo->storage) {
case ArgVtypeInIRegs:
for (i = 0; i < ainfo->nregs; ++i)
p->regs [slot ++] = ((host_mgreg_t*)arg) [i];
break;
case ArgHFA:
if (ainfo->esize == 4) {
for (i = 0; i < ainfo->nregs; ++i)
p->fpregs [ainfo->reg + i] = bitcast_r4_to_r8 (((float*)arg) [ainfo->foffsets [i] / 4]);
} else {
for (i = 0; i < ainfo->nregs; ++i)
p->fpregs [ainfo->reg + i] = ((double*)arg) [ainfo->foffsets [i] / 8];
}
p->n_fpargs += ainfo->nregs;
break;
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
p->regs [slot] = (host_mgreg_t)arg;
break;
case ArgVtypeOnStack:
for (i = 0; i < ainfo->size / 8; ++i)
p->regs [slot ++] = ((host_mgreg_t*)arg) [i];
break;
default:
g_assert_not_reached ();
break;
}
break;
default:
g_assert_not_reached ();
}
}
}
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
CallInfo *cinfo = ainfo->cinfo;
DynCallArgs *args = (DynCallArgs*)buf;
MonoType *ptype = ainfo->rtype;
guint8 *ret = args->ret;
host_mgreg_t res = args->res;
host_mgreg_t res2 = args->res2;
int i;
if (cinfo->ret.storage == ArgVtypeByRef)
return;
switch (ptype->type) {
case MONO_TYPE_VOID:
*(gpointer*)ret = NULL;
break;
case MONO_TYPE_OBJECT:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
*(gpointer*)ret = (gpointer)res;
break;
case MONO_TYPE_I1:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
*(gint32*)ret = res;
break;
case MONO_TYPE_U4:
*(guint32*)ret = res;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
*(guint64*)ret = res;
break;
case MONO_TYPE_R4:
*(float*)ret = bitcast_r8_to_r4 (args->fpregs [0]);
break;
case MONO_TYPE_R8:
*(double*)ret = args->fpregs [0];
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (ptype)) {
*(gpointer*)ret = (gpointer)res;
break;
} else {
/* Fall though */
}
case MONO_TYPE_VALUETYPE:
switch (ainfo->cinfo->ret.storage) {
case ArgVtypeInIRegs:
*(host_mgreg_t*)ret = res;
if (ainfo->cinfo->ret.nregs > 1)
((host_mgreg_t*)ret) [1] = res2;
break;
case ArgHFA:
/* Use the same area for returning fp values */
if (cinfo->ret.esize == 4) {
for (i = 0; i < cinfo->ret.nregs; ++i)
((float*)ret) [cinfo->ret.foffsets [i] / 4] = bitcast_r8_to_r4 (args->fpregs [i]);
} else {
for (i = 0; i < cinfo->ret.nregs; ++i)
((double*)ret) [cinfo->ret.foffsets [i] / 8] = args->fpregs [i];
}
break;
default:
g_assert_not_reached ();
break;
}
break;
default:
g_assert_not_reached ();
}
}
#if __APPLE__
G_BEGIN_DECLS
void sys_icache_invalidate (void *start, size_t len);
G_END_DECLS
#endif
void
mono_arch_flush_icache (guint8 *code, gint size)
{
#ifndef MONO_CROSS_COMPILE
#if __APPLE__
sys_icache_invalidate (code, size);
#else
/* Don't rely on GCC's __clear_cache implementation, as it caches
* icache/dcache cache line sizes, that can vary between cores on
* big.LITTLE architectures. */
guint64 end = (guint64) (code + size);
guint64 addr;
/* always go with cacheline size of 4 bytes as this code isn't perf critical
* anyway. Reading the cache line size from a machine register can be racy
* on a big.LITTLE architecture if the cores don't have the same cache line
* sizes. */
const size_t icache_line_size = 4;
const size_t dcache_line_size = 4;
addr = (guint64) code & ~(guint64) (dcache_line_size - 1);
for (; addr < end; addr += dcache_line_size)
asm volatile("dc civac, %0" : : "r" (addr) : "memory");
asm volatile("dsb ish" : : : "memory");
addr = (guint64) code & ~(guint64) (icache_line_size - 1);
for (; addr < end; addr += icache_line_size)
asm volatile("ic ivau, %0" : : "r" (addr) : "memory");
asm volatile ("dsb ish" : : : "memory");
asm volatile ("isb" : : : "memory");
#endif
#endif
}
#ifndef DISABLE_JIT
gboolean
mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
{
NOT_IMPLEMENTED;
return FALSE;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
if (mono_is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = g_list_prepend (vars, vmv);
}
}
vars = mono_varlist_sort (cfg, vars, 0);
return vars;
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
int i;
/* r28 is reserved for cfg->arch.args_reg */
/* r27 is reserved for the imt argument */
for (i = ARMREG_R19; i <= ARMREG_R26; ++i)
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
return regs;
}
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
MonoInst *ins = cfg->varinfo [vmv->idx];
if (ins->opcode == OP_ARG)
return 1;
else
return 2;
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == ArgVtypeByRef) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
cfg->vret_addr->flags |= MONO_INST_VOLATILE;
}
if (cfg->gen_sdb_seq_points) {
MonoInst *ins;
if (cfg->compile_aot) {
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
}
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_tramp_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.bp_tramp_var = ins;
}
if (cfg->method->save_lmf) {
cfg->create_lmf_var = TRUE;
cfg->lmf_ir = TRUE;
}
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoInst *ins;
CallInfo *cinfo;
ArgInfo *ainfo;
int i, offset, size, align;
guint32 locals_stack_size, locals_stack_align;
gint32 *offsets;
/*
* Allocate arguments and locals to either register (OP_REGVAR) or to a stack slot (OP_REGOFFSET).
* Compute cfg->stack_offset and update cfg->used_int_regs.
*/
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*
* The ARM64 ABI always uses a frame pointer.
* The instruction set prefers positive offsets, so fp points to the bottom of the
* frame, and stack slots are at positive offsets.
* If some arguments are received on the stack, their offsets relative to fp can
* not be computed right now because the stack frame might grow due to spilling
* done by the local register allocator. To solve this, we reserve a register
* which points to them.
* The stack frame looks like this:
* args_reg -> <bottom of parent frame>
* <locals etc>
* fp -> <saved fp+lr>
* sp -> <localloc/params area>
*/
cfg->frame_reg = ARMREG_FP;
cfg->flags |= MONO_CFG_HAS_SPILLUP;
offset = 0;
/* Saved fp+lr */
offset += 16;
if (cinfo->stack_usage) {
g_assert (!(cfg->used_int_regs & (1 << ARMREG_R28)));
cfg->arch.args_reg = ARMREG_R28;
cfg->used_int_regs |= 1 << ARMREG_R28;
}
if (cfg->method->save_lmf) {
/* The LMF var is allocated normally */
} else {
/* Callee saved regs */
cfg->arch.saved_gregs_offset = offset;
for (i = 0; i < 32; ++i)
if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) && (cfg->used_int_regs & (1 << i)))
offset += 8;
}
/* Return value */
switch (cinfo->ret.storage) {
case ArgNone:
break;
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->dreg = cinfo->ret.reg;
break;
case ArgVtypeInIRegs:
case ArgHFA:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
cfg->ret->opcode = OP_REGOFFSET;
cfg->ret->inst_basereg = cfg->frame_reg;
cfg->ret->inst_offset = offset;
if (cinfo->ret.storage == ArgHFA)
// FIXME:
offset += 64;
else
offset += 16;
break;
case ArgVtypeByRef:
/* This variable will be initalized in the prolog from R8 */
cfg->vret_addr->opcode = OP_REGOFFSET;
cfg->vret_addr->inst_basereg = cfg->frame_reg;
cfg->vret_addr->inst_offset = offset;
offset += 8;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr =");
mono_print_ins (cfg->vret_addr);
}
break;
default:
g_assert_not_reached ();
break;
}
/* Arguments */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ainfo = cinfo->args + i;
ins = cfg->args [i];
if (ins->opcode == OP_REGVAR)
continue;
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
// FIXME: Use nregs/size
/* These will be copied to the stack in the prolog */
ins->inst_offset = offset;
offset += 8;
break;
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
case ArgVtypeOnStack:
/* These are in the parent frame */
g_assert (cfg->arch.args_reg);
ins->inst_basereg = cfg->arch.args_reg;
ins->inst_offset = ainfo->offset;
break;
case ArgVtypeInIRegs:
case ArgHFA:
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
ins->inst_offset = offset;
if (cfg->verbose_level >= 2)
printf ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
if (ainfo->storage == ArgHFA)
// FIXME:
offset += 64;
else
offset += 16;
break;
case ArgVtypeByRefOnStack: {
MonoInst *vtaddr;
if (ainfo->gsharedvt) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->arch.args_reg;
ins->inst_offset = ainfo->offset;
break;
}
/* The vtype address is in the parent frame */
g_assert (cfg->arch.args_reg);
MONO_INST_NEW (cfg, vtaddr, 0);
vtaddr->opcode = OP_REGOFFSET;
vtaddr->inst_basereg = cfg->arch.args_reg;
vtaddr->inst_offset = ainfo->offset;
/* Need an indirection */
ins->opcode = OP_VTARG_ADDR;
ins->inst_left = vtaddr;
break;
}
case ArgVtypeByRef: {
MonoInst *vtaddr;
if (ainfo->gsharedvt) {
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += 8;
break;
}
/* The vtype address is in a register, will be copied to the stack in the prolog */
MONO_INST_NEW (cfg, vtaddr, 0);
vtaddr->opcode = OP_REGOFFSET;
vtaddr->inst_basereg = cfg->frame_reg;
vtaddr->inst_offset = offset;
offset += 8;
/* Need an indirection */
ins->opcode = OP_VTARG_ADDR;
ins->inst_left = vtaddr;
break;
}
default:
g_assert_not_reached ();
break;
}
}
/* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
// FIXME: Allocate these to registers
ins = cfg->arch.seq_point_info_var;
if (ins) {
size = 8;
align = 8;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
ins = cfg->arch.ss_tramp_var;
if (ins) {
size = 8;
align = 8;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
ins = cfg->arch.bp_tramp_var;
if (ins) {
size = 8;
align = 8;
offset += align - 1;
offset &= ~(align - 1);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset;
offset += size;
}
/* Locals */
offsets = mono_allocate_stack_slots (cfg, FALSE, &locals_stack_size, &locals_stack_align);
if (locals_stack_align)
offset = ALIGN_TO (offset, locals_stack_align);
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
if (offsets [i] != -1) {
ins = cfg->varinfo [i];
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
ins->inst_offset = offset + offsets [i];
//printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
}
}
offset += locals_stack_size;
offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
cfg->stack_offset = offset;
}
#ifdef ENABLE_LLVM
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
LLVMCallInfo *linfo;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
linfo->ret.storage = LLVMArgNormal;
break;
case ArgNone:
linfo->ret.storage = LLVMArgNone;
break;
case ArgVtypeByRef:
linfo->ret.storage = LLVMArgVtypeByRef;
break;
//
// FIXME: This doesn't work yet since the llvm backend represents these types as an i8
// array which is returned in int regs
//
case ArgHFA:
linfo->ret.storage = LLVMArgFpStruct;
linfo->ret.nslots = cinfo->ret.nregs;
linfo->ret.esize = cinfo->ret.esize;
break;
case ArgVtypeInIRegs:
/* LLVM models this by returning an int */
linfo->ret.storage = LLVMArgVtypeAsScalar;
linfo->ret.nslots = cinfo->ret.nregs;
linfo->ret.esize = cinfo->ret.esize;
break;
default:
g_assert_not_reached ();
break;
}
for (i = 0; i < n; ++i) {
LLVMArgInfo *lainfo = &linfo->args [i];
ainfo = cinfo->args + i;
lainfo->storage = LLVMArgNone;
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
lainfo->storage = LLVMArgNormal;
break;
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
lainfo->storage = LLVMArgVtypeByRef;
break;
case ArgHFA: {
int j;
lainfo->storage = LLVMArgAsFpArgs;
lainfo->nslots = ainfo->nregs;
lainfo->esize = ainfo->esize;
for (j = 0; j < ainfo->nregs; ++j)
lainfo->pair_storage [j] = LLVMArgInFPReg;
break;
}
case ArgVtypeInIRegs:
lainfo->storage = LLVMArgAsIArgs;
lainfo->nslots = ainfo->nregs;
break;
case ArgVtypeOnStack:
if (ainfo->hfa) {
int j;
/* Same as above */
lainfo->storage = LLVMArgAsFpArgs;
lainfo->nslots = ainfo->nregs;
lainfo->esize = ainfo->esize;
lainfo->ndummy_fpargs = ainfo->nfregs_to_skip;
for (j = 0; j < ainfo->nregs; ++j)
lainfo->pair_storage [j] = LLVMArgInFPReg;
} else {
lainfo->storage = LLVMArgAsIArgs;
lainfo->nslots = ainfo->size / 8;
}
break;
default:
g_assert_not_reached ();
break;
}
}
return linfo;
}
#endif
static void
add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
{
MonoInst *ins;
switch (storage) {
case ArgInIReg:
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg_copy (cfg, arg->dreg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
break;
case ArgInFReg:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
case ArgInFRegR4:
if (COMPILE_LLVM (cfg))
MONO_INST_NEW (cfg, ins, OP_FMOVE);
else if (cfg->r4fp)
MONO_INST_NEW (cfg, ins, OP_RMOVE);
else
MONO_INST_NEW (cfg, ins, OP_ARM_SETFREG_R4);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = arg->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
break;
}
}
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmp_sig;
int sig_reg;
if (MONO_IS_TAILCALL_OPCODE (call))
NOT_IMPLEMENTED;
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it are
* passed on the stack after the signature. So compensate by
* passing a different signature.
*/
tmp_sig = mono_metadata_signature_dup (call->signature);
tmp_sig->param_count -= call->signature->sentinelpos;
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
sig_reg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
}
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoMethodSignature *sig;
MonoInst *arg, *vtarg;
CallInfo *cinfo;
ArgInfo *ainfo;
int i;
sig = call->signature;
cinfo = get_call_info (cfg->mempool, sig);
switch (cinfo->ret.storage) {
case ArgVtypeInIRegs:
case ArgHFA:
if (MONO_IS_TAILCALL_OPCODE (call))
break;
/*
* The vtype is returned in registers, save the return area address in a local, and save the vtype into
* the location pointed to by it after call in emit_move_return_value ().
*/
if (!cfg->arch.vret_addr_loc) {
cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
/* Prevent it from being register allocated or optimized away */
cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
break;
case ArgVtypeByRef:
/* Pass the vtype return address in R8 */
g_assert (!MONO_IS_TAILCALL_OPCODE (call) || call->vret_var == cfg->vret_addr);
MONO_INST_NEW (cfg, vtarg, OP_MOVE);
vtarg->sreg1 = call->vret_var->dreg;
vtarg->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
break;
default:
break;
}
for (i = 0; i < cinfo->nargs; ++i) {
ainfo = cinfo->args + i;
arg = call->args [i];
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Emit the signature cookie just before the implicit arguments */
emit_sig_cookie (cfg, call, cinfo);
}
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFReg:
case ArgInFRegR4:
add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, arg);
break;
case ArgOnStack:
switch (ainfo->slot_size) {
case 8:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case 4:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case 2:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case 1:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
default:
g_assert_not_reached ();
break;
}
break;
case ArgOnStackR8:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case ArgOnStackR4:
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, arg->dreg);
break;
case ArgVtypeInIRegs:
case ArgVtypeByRef:
case ArgVtypeByRefOnStack:
case ArgVtypeOnStack:
case ArgHFA: {
MonoInst *ins;
guint32 align;
guint32 size;
size = mono_class_value_size (arg->klass, &align);
MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
ins->sreg1 = arg->dreg;
ins->klass = arg->klass;
ins->backend.size = size;
ins->inst_p0 = call;
ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
MONO_ADD_INS (cfg->cbb, ins);
break;
}
default:
g_assert_not_reached ();
break;
}
}
/* Handle the case where there are no implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (cinfo->nargs == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
call->call_info = cinfo;
call->stack_usage = cinfo->stack_usage;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
MonoInst *load;
int i;
if (ins->backend.size == 0 && !ainfo->gsharedvt)
return;
switch (ainfo->storage) {
case ArgVtypeInIRegs:
for (i = 0; i < ainfo->nregs; ++i) {
// FIXME: Smaller sizes
MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
load->dreg = mono_alloc_ireg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = i * sizeof (target_mgreg_t);
MONO_ADD_INS (cfg->cbb, load);
add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg + i, load);
}
break;
case ArgHFA:
for (i = 0; i < ainfo->nregs; ++i) {
if (ainfo->esize == 4)
MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
else
MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
load->dreg = mono_alloc_freg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = ainfo->foffsets [i];
MONO_ADD_INS (cfg->cbb, load);
add_outarg_reg (cfg, call, ainfo->esize == 4 ? ArgInFRegR4 : ArgInFReg, ainfo->reg + i, load);
}
break;
case ArgVtypeByRef:
case ArgVtypeByRefOnStack: {
MonoInst *vtaddr, *load, *arg;
/* Pass the vtype address in a reg/on the stack */
if (ainfo->gsharedvt) {
load = src;
} else {
/* Make a copy of the argument */
vtaddr = mono_compile_create_var (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL);
MONO_INST_NEW (cfg, load, OP_LDADDR);
load->inst_p0 = vtaddr;
vtaddr->flags |= MONO_INST_INDIRECT;
load->type = STACK_MP;
load->klass = vtaddr->klass;
load->dreg = mono_alloc_ireg (cfg);
MONO_ADD_INS (cfg->cbb, load);
mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, ainfo->size, 8);
}
if (ainfo->storage == ArgVtypeByRef) {
MONO_INST_NEW (cfg, arg, OP_MOVE);
arg->dreg = mono_alloc_preg (cfg);
arg->sreg1 = load->dreg;
MONO_ADD_INS (cfg->cbb, arg);
add_outarg_reg (cfg, call, ArgInIReg, ainfo->reg, arg);
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, load->dreg);
}
break;
}
case ArgVtypeOnStack:
for (i = 0; i < ainfo->size / 8; ++i) {
MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
load->dreg = mono_alloc_ireg (cfg);
load->inst_basereg = src->dreg;
load->inst_offset = i * 8;
MONO_ADD_INS (cfg->cbb, load);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset + (i * 8), load->dreg);
}
break;
default:
g_assert_not_reached ();
break;
}
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
switch (cinfo->ret.storage) {
case ArgNone:
break;
case ArgInIReg:
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
break;
case ArgInFReg:
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
break;
case ArgInFRegR4:
if (COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
else if (cfg->r4fp)
MONO_EMIT_NEW_UNALU (cfg, OP_RMOVE, cfg->ret->dreg, val->dreg);
else
MONO_EMIT_NEW_UNALU (cfg, OP_ARM_SETFREG_R4, cfg->ret->dreg, val->dreg);
break;
default:
g_assert_not_reached ();
break;
}
}
#ifndef DISABLE_JIT
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
g_assert (caller_sig);
g_assert (callee_sig);
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
&& IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
// FIXME Limit stack_usage to 1G. emit_ldrx / strx has 32bit limits.
res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (1 << 30));
res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (1 << 30));
// valuetype parameters are the address of a local
const ArgInfo *ainfo;
ainfo = callee_info->args + callee_sig->hasthis;
for (int i = 0; res && i < callee_sig->param_count; ++i) {
res = IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRef)
&& IS_SUPPORTED_TAILCALL (ainfo [i].storage != ArgVtypeByRefOnStack);
}
g_free (caller_info);
g_free (callee_info);
return res;
}
#endif
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return (imm >= -((gint64)1<<31) && imm <= (((gint64)1<<31)-1));
}
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
//NOT_IMPLEMENTED;
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
//NOT_IMPLEMENTED;
}
#define ADD_NEW_INS(cfg,dest,op) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *temp, *last_ins = NULL;
MONO_BB_FOR_EACH_INS (bb, ins) {
switch (ins->opcode) {
case OP_SBB:
case OP_ISBB:
case OP_SUBCC:
case OP_ISUBCC:
if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
/* ARM sets the C flag to 1 if there was _no_ overflow */
ins->next->opcode = OP_COND_EXC_NC;
break;
case OP_IDIV_IMM:
case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LREM_IMM:
mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_LOCALLOC_IMM:
if (ins->inst_imm > 32) {
ADD_NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = temp->dreg;
ins->opcode = mono_op_imm_to_op (ins->opcode);
}
break;
case OP_ICOMPARE_IMM:
if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBEQ) {
ins->next->opcode = OP_ARM64_CBZW;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
} else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_IBNE_UN) {
ins->next->opcode = OP_ARM64_CBNZW;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
}
break;
case OP_LCOMPARE_IMM:
case OP_COMPARE_IMM:
if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBEQ) {
ins->next->opcode = OP_ARM64_CBZX;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
} else if (ins->inst_imm == 0 && ins->next && ins->next->opcode == OP_LBNE_UN) {
ins->next->opcode = OP_ARM64_CBNZX;
ins->next->sreg1 = ins->sreg1;
NULLIFY_INS (ins);
}
break;
case OP_FCOMPARE:
case OP_RCOMPARE: {
gboolean swap = FALSE;
int reg;
if (!ins->next) {
/* Optimized away */
NULLIFY_INS (ins);
break;
}
/*
* FP compares with unordered operands set the flags
* to NZCV=0011, which matches some non-unordered compares
* as well, like LE, so have to swap the operands.
*/
switch (ins->next->opcode) {
case OP_FBLT:
ins->next->opcode = OP_FBGT;
swap = TRUE;
break;
case OP_FBLE:
ins->next->opcode = OP_FBGE;
swap = TRUE;
break;
case OP_RBLT:
ins->next->opcode = OP_RBGT;
swap = TRUE;
break;
case OP_RBLE:
ins->next->opcode = OP_RBGE;
swap = TRUE;
break;
default:
break;
}
if (swap) {
reg = ins->sreg1;
ins->sreg1 = ins->sreg2;
ins->sreg2 = reg;
}
break;
}
default:
break;
}
last_ins = ins;
}
bb->last_ins = last_ins;
bb->max_vreg = cfg->next_vreg;
}
void
mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
{
}
static int
opcode_to_armcond (int opcode)
{
switch (opcode) {
case OP_IBEQ:
case OP_LBEQ:
case OP_FBEQ:
case OP_CEQ:
case OP_ICEQ:
case OP_LCEQ:
case OP_FCEQ:
case OP_RCEQ:
case OP_COND_EXC_IEQ:
case OP_COND_EXC_EQ:
return ARMCOND_EQ;
case OP_IBGE:
case OP_LBGE:
case OP_FBGE:
case OP_ICGE:
case OP_FCGE:
case OP_RCGE:
return ARMCOND_GE;
case OP_IBGT:
case OP_LBGT:
case OP_FBGT:
case OP_CGT:
case OP_ICGT:
case OP_LCGT:
case OP_FCGT:
case OP_RCGT:
case OP_COND_EXC_IGT:
case OP_COND_EXC_GT:
return ARMCOND_GT;
case OP_IBLE:
case OP_LBLE:
case OP_FBLE:
case OP_ICLE:
case OP_FCLE:
case OP_RCLE:
return ARMCOND_LE;
case OP_IBLT:
case OP_LBLT:
case OP_FBLT:
case OP_CLT:
case OP_ICLT:
case OP_LCLT:
case OP_COND_EXC_ILT:
case OP_COND_EXC_LT:
return ARMCOND_LT;
case OP_IBNE_UN:
case OP_LBNE_UN:
case OP_FBNE_UN:
case OP_ICNEQ:
case OP_FCNEQ:
case OP_RCNEQ:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_NE_UN:
return ARMCOND_NE;
case OP_IBGE_UN:
case OP_LBGE_UN:
case OP_FBGE_UN:
case OP_ICGE_UN:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_GE_UN:
return ARMCOND_HS;
case OP_IBGT_UN:
case OP_LBGT_UN:
case OP_FBGT_UN:
case OP_CGT_UN:
case OP_ICGT_UN:
case OP_LCGT_UN:
case OP_FCGT_UN:
case OP_RCGT_UN:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_GT_UN:
return ARMCOND_HI;
case OP_IBLE_UN:
case OP_LBLE_UN:
case OP_FBLE_UN:
case OP_ICLE_UN:
case OP_COND_EXC_ILE_UN:
case OP_COND_EXC_LE_UN:
return ARMCOND_LS;
case OP_IBLT_UN:
case OP_LBLT_UN:
case OP_FBLT_UN:
case OP_CLT_UN:
case OP_ICLT_UN:
case OP_LCLT_UN:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_LT_UN:
return ARMCOND_LO;
/*
* FCMP sets the NZCV condition bits as follows:
* eq = 0110
* < = 1000
* > = 0010
* unordered = 0011
* ARMCOND_LT is N!=V, so it matches unordered too, so
* fclt and fclt_un need to be special cased.
*/
case OP_FCLT:
case OP_RCLT:
/* N==1 */
return ARMCOND_MI;
case OP_FCLT_UN:
case OP_RCLT_UN:
return ARMCOND_LT;
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
return ARMCOND_CS;
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
return ARMCOND_VS;
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
return ARMCOND_CC;
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
return ARMCOND_VC;
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
return -1;
}
}
/* This clobbers LR */
static WARN_UNUSED_RESULT guint8*
emit_cond_exc (MonoCompile *cfg, guint8 *code, int opcode, const char *exc_name)
{
int cond;
cond = opcode_to_armcond (opcode);
/* Capture PC */
arm_adrx (code, ARMREG_IP1, code);
mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, exc_name, MONO_R_ARM64_BCC);
arm_bcc (code, cond, 0);
return code;
}
static guint8*
emit_move_return_value (MonoCompile *cfg, guint8 * code, MonoInst *ins)
{
CallInfo *cinfo;
MonoCallInst *call;
call = (MonoCallInst*)ins;
cinfo = call->call_info;
g_assert (cinfo);
switch (cinfo->ret.storage) {
case ArgNone:
break;
case ArgInIReg:
/* LLVM compiled code might only set the bottom bits */
if (call->signature && mini_get_underlying_type (call->signature->ret)->type == MONO_TYPE_I4)
arm_sxtwx (code, call->inst.dreg, cinfo->ret.reg);
else if (call->inst.dreg != cinfo->ret.reg)
arm_movx (code, call->inst.dreg, cinfo->ret.reg);
break;
case ArgInFReg:
if (call->inst.dreg != cinfo->ret.reg)
arm_fmovd (code, call->inst.dreg, cinfo->ret.reg);
break;
case ArgInFRegR4:
if (cfg->r4fp)
arm_fmovs (code, call->inst.dreg, cinfo->ret.reg);
else
arm_fcvt_sd (code, call->inst.dreg, cinfo->ret.reg);
break;
case ArgVtypeInIRegs: {
MonoInst *loc = cfg->arch.vret_addr_loc;
int i;
/* Load the destination address */
g_assert (loc && loc->opcode == OP_REGOFFSET);
code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
for (i = 0; i < cinfo->ret.nregs; ++i)
arm_strx (code, cinfo->ret.reg + i, ARMREG_LR, i * 8);
break;
}
case ArgHFA: {
MonoInst *loc = cfg->arch.vret_addr_loc;
int i;
/* Load the destination address */
g_assert (loc && loc->opcode == OP_REGOFFSET);
code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
arm_strfpw (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]);
else
arm_strfpx (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]);
}
break;
}
case ArgVtypeByRef:
break;
default:
g_assert_not_reached ();
break;
}
return code;
}
/*
* emit_branch_island:
*
* Emit a branch island for the conditional branches from cfg->native_code + start_offset to code.
*/
static guint8*
emit_branch_island (MonoCompile *cfg, guint8 *code, int start_offset)
{
MonoJumpInfo *ji;
/* Iterate over the patch infos added so far by this bb */
int island_size = 0;
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->ip.i < start_offset)
/* The patch infos are in reverse order, so this means the end */
break;
if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ)
island_size += 4;
}
if (island_size) {
code = realloc_code (cfg, island_size);
/* Branch over the island */
arm_b (code, code + 4 + island_size);
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->ip.i < start_offset)
break;
if (ji->relocation == MONO_R_ARM64_BCC || ji->relocation == MONO_R_ARM64_CBZ) {
/* Rewrite the cond branch so it branches to an unconditional branch in the branch island */
arm_patch_rel (cfg->native_code + ji->ip.i, code, ji->relocation);
/* Rewrite the patch so it points to the unconditional branch */
ji->ip.i = code - cfg->native_code;
ji->relocation = MONO_R_ARM64_B;
arm_b (code, code);
}
}
set_code_cursor (cfg, code);
}
return code;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
int start_offset, max_len, dreg, sreg1, sreg2;
target_mgreg_t imm;
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
start_offset = code - cfg->native_code;
g_assert (start_offset <= cfg->code_size);
MONO_BB_FOR_EACH_INS (bb, ins) {
guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
if (G_UNLIKELY (cfg->arch.cond_branch_islands && offset - start_offset > 4 * 0x1ffff)) {
/* Emit a branch island for large basic blocks */
code = emit_branch_island (cfg, code, start_offset);
offset = code - cfg->native_code;
start_offset = offset;
}
mono_debug_record_line_number (cfg, ins, offset);
dreg = ins->dreg;
sreg1 = ins->sreg1;
sreg2 = ins->sreg2;
imm = ins->inst_imm;
switch (ins->opcode) {
case OP_ICONST:
code = emit_imm (code, dreg, ins->inst_c0);
break;
case OP_I8CONST:
code = emit_imm64 (code, dreg, ins->inst_c0);
break;
case OP_MOVE:
if (dreg != sreg1)
arm_movx (code, dreg, sreg1);
break;
case OP_NOP:
case OP_RELAXED_NOP:
break;
case OP_JUMP_TABLE:
mono_add_patch_info_rel (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0, MONO_R_ARM64_IMM);
code = emit_imm64_template (code, dreg);
break;
case OP_BREAK:
/*
* gdb does not like encountering the hw breakpoint ins in the debugged code.
* So instead of emitting a trap, we emit a call a C function and place a
* breakpoint there.
*/
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
break;
case OP_LOCALLOC: {
guint8 *buf [16];
arm_addx_imm (code, ARMREG_IP0, sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
// FIXME: andx_imm doesn't work yet
code = emit_imm (code, ARMREG_IP1, -MONO_ARCH_FRAME_ALIGNMENT);
arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1);
//arm_andx_imm (code, ARMREG_IP0, sreg1, - MONO_ARCH_FRAME_ALIGNMENT);
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_subx (code, ARMREG_IP1, ARMREG_IP1, ARMREG_IP0);
arm_movspx (code, ARMREG_SP, ARMREG_IP1);
/* Init */
/* ip1 = pointer, ip0 = end */
arm_addx (code, ARMREG_IP0, ARMREG_IP1, ARMREG_IP0);
buf [0] = code;
arm_cmpx (code, ARMREG_IP1, ARMREG_IP0);
buf [1] = code;
arm_bcc (code, ARMCOND_EQ, 0);
arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_IP1, 0);
arm_addx_imm (code, ARMREG_IP1, ARMREG_IP1, 16);
arm_b (code, buf [0]);
arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC);
arm_movspx (code, dreg, ARMREG_SP);
if (cfg->param_area)
code = emit_subx_sp_imm (code, cfg->param_area);
break;
}
case OP_LOCALLOC_IMM: {
int imm, offset;
imm = ALIGN_TO (ins->inst_imm, MONO_ARCH_FRAME_ALIGNMENT);
g_assert (arm_is_arith_imm (imm));
arm_subx_imm (code, ARMREG_SP, ARMREG_SP, imm);
/* Init */
g_assert (MONO_ARCH_FRAME_ALIGNMENT == 16);
offset = 0;
while (offset < imm) {
arm_stpx (code, ARMREG_RZR, ARMREG_RZR, ARMREG_SP, offset);
offset += 16;
}
arm_movspx (code, dreg, ARMREG_SP);
if (cfg->param_area)
code = emit_subx_sp_imm (code, cfg->param_area);
break;
}
case OP_AOTCONST:
code = emit_aotconst (cfg, code, dreg, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
break;
case OP_OBJC_GET_SELECTOR:
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
/* See arch_emit_objc_selector_ref () in aot-compiler.c */
arm_ldrx_lit (code, ins->dreg, 0);
arm_nop (code);
arm_nop (code);
break;
case OP_SEQ_POINT: {
MonoInst *info_var = cfg->arch.seq_point_info_var;
/*
* For AOT, we use one got slot per method, which will point to a
* SeqPointInfo structure, containing all the information required
* by the code below.
*/
if (cfg->compile_aot) {
g_assert (info_var);
g_assert (info_var->opcode == OP_REGOFFSET);
}
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
MonoInst *var = cfg->arch.ss_tramp_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
/* Load ss_tramp_var */
/* This is equal to &ss_trampoline */
arm_ldrx (code, ARMREG_IP1, var->inst_basereg, var->inst_offset);
/* Load the trampoline address */
arm_ldrx (code, ARMREG_IP1, ARMREG_IP1, 0);
/* Call it if it is non-null */
arm_cbzx (code, ARMREG_IP1, code + 8);
code = mono_arm_emit_blrx (code, ARMREG_IP1);
}
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
if (cfg->compile_aot) {
const guint32 offset = code - cfg->native_code;
guint32 val;
arm_ldrx (code, ARMREG_IP1, info_var->inst_basereg, info_var->inst_offset);
/* Add the offset */
val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either 0 or the address of the bp trampoline */
code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP1, val);
/* Skip the load if its 0 */
arm_cbzx (code, ARMREG_IP1, code + 8);
/* Call the breakpoint trampoline */
code = mono_arm_emit_blrx (code, ARMREG_IP1);
} else {
MonoInst *var = cfg->arch.bp_tramp_var;
g_assert (var);
g_assert (var->opcode == OP_REGOFFSET);
/* Load the address of the bp trampoline into IP0 */
arm_ldrx (code, ARMREG_IP0, var->inst_basereg, var->inst_offset);
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
arm_nop (code);
}
break;
}
/* BRANCH */
case OP_BR:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_B);
arm_b (code, code);
break;
case OP_BR_REG:
arm_brx (code, sreg1);
break;
case OP_IBEQ:
case OP_IBGE:
case OP_IBGT:
case OP_IBLE:
case OP_IBLT:
case OP_IBNE_UN:
case OP_IBGE_UN:
case OP_IBGT_UN:
case OP_IBLE_UN:
case OP_IBLT_UN:
case OP_LBEQ:
case OP_LBGE:
case OP_LBGT:
case OP_LBLE:
case OP_LBLT:
case OP_LBNE_UN:
case OP_LBGE_UN:
case OP_LBGT_UN:
case OP_LBLE_UN:
case OP_LBLT_UN:
case OP_FBEQ:
case OP_FBNE_UN:
case OP_FBLT:
case OP_FBGT:
case OP_FBGT_UN:
case OP_FBLE:
case OP_FBGE:
case OP_FBGE_UN: {
int cond;
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
cond = opcode_to_armcond (ins->opcode);
arm_bcc (code, cond, 0);
break;
}
case OP_FBLT_UN:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
/* For fp compares, ARMCOND_LT is lt or unordered */
arm_bcc (code, ARMCOND_LT, 0);
break;
case OP_FBLE_UN:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
arm_bcc (code, ARMCOND_EQ, 0);
mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_BCC);
/* For fp compares, ARMCOND_LT is lt or unordered */
arm_bcc (code, ARMCOND_LT, 0);
break;
case OP_ARM64_CBZW:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbzw (code, sreg1, 0);
break;
case OP_ARM64_CBZX:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbzx (code, sreg1, 0);
break;
case OP_ARM64_CBNZW:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbnzw (code, sreg1, 0);
break;
case OP_ARM64_CBNZX:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_true_bb, MONO_R_ARM64_CBZ);
arm_cbnzx (code, sreg1, 0);
break;
/* ALU */
case OP_IADD:
arm_addw (code, dreg, sreg1, sreg2);
break;
case OP_LADD:
arm_addx (code, dreg, sreg1, sreg2);
break;
case OP_ISUB:
arm_subw (code, dreg, sreg1, sreg2);
break;
case OP_LSUB:
arm_subx (code, dreg, sreg1, sreg2);
break;
case OP_IAND:
arm_andw (code, dreg, sreg1, sreg2);
break;
case OP_LAND:
arm_andx (code, dreg, sreg1, sreg2);
break;
case OP_IOR:
arm_orrw (code, dreg, sreg1, sreg2);
break;
case OP_LOR:
arm_orrx (code, dreg, sreg1, sreg2);
break;
case OP_IXOR:
arm_eorw (code, dreg, sreg1, sreg2);
break;
case OP_LXOR:
arm_eorx (code, dreg, sreg1, sreg2);
break;
case OP_INEG:
arm_negw (code, dreg, sreg1);
break;
case OP_LNEG:
arm_negx (code, dreg, sreg1);
break;
case OP_INOT:
arm_mvnw (code, dreg, sreg1);
break;
case OP_LNOT:
arm_mvnx (code, dreg, sreg1);
break;
case OP_IADDCC:
arm_addsw (code, dreg, sreg1, sreg2);
break;
case OP_ADDCC:
case OP_LADDCC:
arm_addsx (code, dreg, sreg1, sreg2);
break;
case OP_ISUBCC:
arm_subsw (code, dreg, sreg1, sreg2);
break;
case OP_LSUBCC:
case OP_SUBCC:
arm_subsx (code, dreg, sreg1, sreg2);
break;
case OP_ICOMPARE:
arm_cmpw (code, sreg1, sreg2);
break;
case OP_COMPARE:
case OP_LCOMPARE:
arm_cmpx (code, sreg1, sreg2);
break;
case OP_IADD_IMM:
code = emit_addw_imm (code, dreg, sreg1, imm);
break;
case OP_LADD_IMM:
case OP_ADD_IMM:
code = emit_addx_imm (code, dreg, sreg1, imm);
break;
case OP_ISUB_IMM:
code = emit_subw_imm (code, dreg, sreg1, imm);
break;
case OP_LSUB_IMM:
code = emit_subx_imm (code, dreg, sreg1, imm);
break;
case OP_IAND_IMM:
code = emit_andw_imm (code, dreg, sreg1, imm);
break;
case OP_LAND_IMM:
case OP_AND_IMM:
code = emit_andx_imm (code, dreg, sreg1, imm);
break;
case OP_IOR_IMM:
code = emit_orrw_imm (code, dreg, sreg1, imm);
break;
case OP_LOR_IMM:
code = emit_orrx_imm (code, dreg, sreg1, imm);
break;
case OP_IXOR_IMM:
code = emit_eorw_imm (code, dreg, sreg1, imm);
break;
case OP_LXOR_IMM:
code = emit_eorx_imm (code, dreg, sreg1, imm);
break;
case OP_ICOMPARE_IMM:
code = emit_cmpw_imm (code, sreg1, imm);
break;
case OP_LCOMPARE_IMM:
case OP_COMPARE_IMM:
if (imm == 0) {
arm_cmpx (code, sreg1, ARMREG_RZR);
} else {
// FIXME: 32 vs 64 bit issues for 0xffffffff
code = emit_imm64 (code, ARMREG_LR, imm);
arm_cmpx (code, sreg1, ARMREG_LR);
}
break;
case OP_ISHL:
arm_lslvw (code, dreg, sreg1, sreg2);
break;
case OP_LSHL:
arm_lslvx (code, dreg, sreg1, sreg2);
break;
case OP_ISHR:
arm_asrvw (code, dreg, sreg1, sreg2);
break;
case OP_LSHR:
arm_asrvx (code, dreg, sreg1, sreg2);
break;
case OP_ISHR_UN:
arm_lsrvw (code, dreg, sreg1, sreg2);
break;
case OP_LSHR_UN:
arm_lsrvx (code, dreg, sreg1, sreg2);
break;
case OP_ISHL_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lslw (code, dreg, sreg1, imm);
break;
case OP_SHL_IMM:
case OP_LSHL_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lslx (code, dreg, sreg1, imm);
break;
case OP_ISHR_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_asrw (code, dreg, sreg1, imm);
break;
case OP_LSHR_IMM:
case OP_SHR_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_asrx (code, dreg, sreg1, imm);
break;
case OP_ISHR_UN_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lsrw (code, dreg, sreg1, imm);
break;
case OP_SHR_UN_IMM:
case OP_LSHR_UN_IMM:
if (imm == 0)
arm_movx (code, dreg, sreg1);
else
arm_lsrx (code, dreg, sreg1, imm);
break;
/* 64BIT ALU */
case OP_SEXT_I4:
arm_sxtwx (code, dreg, sreg1);
break;
case OP_ZEXT_I4:
/* Clean out the upper word */
arm_movw (code, dreg, sreg1);
break;
/* MULTIPLY/DIVISION */
case OP_IDIV:
case OP_IREM:
// FIXME: Optimize this
/* Check for zero */
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
/* Check for INT_MIN/-1 */
code = emit_imm (code, ARMREG_IP0, 0x80000000);
arm_cmpx (code, sreg1, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP1);
code = emit_imm (code, ARMREG_IP0, 0xffffffff);
arm_cmpx (code, sreg2, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP0);
arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1);
arm_cmpx_imm (code, ARMREG_IP0, 1);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException");
if (ins->opcode == OP_IREM) {
arm_sdivw (code, ARMREG_LR, sreg1, sreg2);
arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1);
} else {
arm_sdivw (code, dreg, sreg1, sreg2);
}
break;
case OP_IDIV_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivw (code, dreg, sreg1, sreg2);
break;
case OP_IREM_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivw (code, ARMREG_LR, sreg1, sreg2);
arm_msubw (code, dreg, ARMREG_LR, sreg2, sreg1);
break;
case OP_LDIV:
case OP_LREM:
// FIXME: Optimize this
/* Check for zero */
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
/* Check for INT64_MIN/-1 */
code = emit_imm64 (code, ARMREG_IP0, 0x8000000000000000);
arm_cmpx (code, sreg1, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP1);
code = emit_imm64 (code, ARMREG_IP0, 0xffffffffffffffff);
arm_cmpx (code, sreg2, ARMREG_IP0);
arm_cset (code, ARMCOND_EQ, ARMREG_IP0);
arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1);
arm_cmpx_imm (code, ARMREG_IP0, 1);
/* 64 bit uses OverflowException */
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "OverflowException");
if (ins->opcode == OP_LREM) {
arm_sdivx (code, ARMREG_LR, sreg1, sreg2);
arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1);
} else {
arm_sdivx (code, dreg, sreg1, sreg2);
}
break;
case OP_LDIV_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivx (code, dreg, sreg1, sreg2);
break;
case OP_LREM_UN:
arm_cmpx_imm (code, sreg2, 0);
code = emit_cond_exc (cfg, code, OP_COND_EXC_IEQ, "DivideByZeroException");
arm_udivx (code, ARMREG_LR, sreg1, sreg2);
arm_msubx (code, dreg, ARMREG_LR, sreg2, sreg1);
break;
case OP_IMUL:
arm_mulw (code, dreg, sreg1, sreg2);
break;
case OP_LMUL:
arm_mulx (code, dreg, sreg1, sreg2);
break;
case OP_IMUL_IMM:
code = emit_imm (code, ARMREG_LR, imm);
arm_mulw (code, dreg, sreg1, ARMREG_LR);
break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
code = emit_imm (code, ARMREG_LR, imm);
arm_mulx (code, dreg, sreg1, ARMREG_LR);
break;
/* CONVERSIONS */
case OP_ICONV_TO_I1:
case OP_LCONV_TO_I1:
arm_sxtbx (code, dreg, sreg1);
break;
case OP_ICONV_TO_I2:
case OP_LCONV_TO_I2:
arm_sxthx (code, dreg, sreg1);
break;
case OP_ICONV_TO_U1:
case OP_LCONV_TO_U1:
arm_uxtbw (code, dreg, sreg1);
break;
case OP_ICONV_TO_U2:
case OP_LCONV_TO_U2:
arm_uxthw (code, dreg, sreg1);
break;
/* CSET */
case OP_CEQ:
case OP_ICEQ:
case OP_LCEQ:
case OP_CLT:
case OP_ICLT:
case OP_LCLT:
case OP_CGT:
case OP_ICGT:
case OP_LCGT:
case OP_CLT_UN:
case OP_ICLT_UN:
case OP_LCLT_UN:
case OP_CGT_UN:
case OP_ICGT_UN:
case OP_LCGT_UN:
case OP_ICNEQ:
case OP_ICGE:
case OP_ICLE:
case OP_ICGE_UN:
case OP_ICLE_UN: {
int cond;
cond = opcode_to_armcond (ins->opcode);
arm_cset (code, cond, dreg);
break;
}
case OP_FCEQ:
case OP_FCLT:
case OP_FCLT_UN:
case OP_FCGT:
case OP_FCGT_UN:
case OP_FCNEQ:
case OP_FCLE:
case OP_FCGE: {
int cond;
cond = opcode_to_armcond (ins->opcode);
arm_fcmpd (code, sreg1, sreg2);
arm_cset (code, cond, dreg);
break;
}
/* MEMORY */
case OP_LOADI1_MEMBASE:
code = emit_ldrsbx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU1_MEMBASE:
code = emit_ldrb (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADI2_MEMBASE:
code = emit_ldrshx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU2_MEMBASE:
code = emit_ldrh (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADI4_MEMBASE:
code = emit_ldrswx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADU4_MEMBASE:
code = emit_ldrw (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE:
code = emit_ldrx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
case OP_STORE_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM: {
int immreg;
if (imm != 0) {
code = emit_imm (code, ARMREG_LR, imm);
immreg = ARMREG_LR;
} else {
immreg = ARMREG_RZR;
}
switch (ins->opcode) {
case OP_STOREI1_MEMBASE_IMM:
code = emit_strb (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_IMM:
code = emit_strh (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI4_MEMBASE_IMM:
code = emit_strw (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM:
code = emit_strx (code, immreg, ins->inst_destbasereg, ins->inst_offset);
break;
default:
g_assert_not_reached ();
break;
}
break;
}
case OP_STOREI1_MEMBASE_REG:
code = emit_strb (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI2_MEMBASE_REG:
code = emit_strh (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STOREI4_MEMBASE_REG:
code = emit_strw (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG:
code = emit_strx (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_TLS_GET:
code = emit_tls_get (code, dreg, ins->inst_offset);
break;
case OP_TLS_SET:
code = emit_tls_set (code, sreg1, ins->inst_offset);
break;
/* Atomic */
case OP_MEMORY_BARRIER:
arm_dmb (code, ARM_DMB_ISH);
break;
case OP_ATOMIC_ADD_I4: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrw (code, ARMREG_IP0, sreg1);
arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2);
arm_stlxrw (code, ARMREG_IP1, ARMREG_IP0, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_ADD_I8: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrx (code, ARMREG_IP0, sreg1);
arm_addx (code, ARMREG_IP0, ARMREG_IP0, sreg2);
arm_stlxrx (code, ARMREG_IP1, ARMREG_IP0, sreg1);
arm_cbnzx (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_EXCHANGE_I4: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrw (code, ARMREG_IP0, sreg1);
arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_EXCHANGE_I8: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrx (code, ARMREG_IP0, sreg1);
arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_CAS_I4: {
guint8 *buf [16];
/* sreg2 is the value, sreg3 is the comparand */
buf [0] = code;
arm_ldxrw (code, ARMREG_IP0, sreg1);
arm_cmpw (code, ARMREG_IP0, ins->sreg3);
buf [1] = code;
arm_bcc (code, ARMCOND_NE, 0);
arm_stlxrw (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_CAS_I8: {
guint8 *buf [16];
buf [0] = code;
arm_ldxrx (code, ARMREG_IP0, sreg1);
arm_cmpx (code, ARMREG_IP0, ins->sreg3);
buf [1] = code;
arm_bcc (code, ARMCOND_NE, 0);
arm_stlxrx (code, ARMREG_IP1, sreg2, sreg1);
arm_cbnzw (code, ARMREG_IP1, buf [0]);
arm_patch_rel (buf [1], code, MONO_R_ARM64_BCC);
arm_dmb (code, ARM_DMB_ISH);
arm_movx (code, dreg, ARMREG_IP0);
break;
}
case OP_ATOMIC_LOAD_I1: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarb (code, ins->dreg, ARMREG_LR);
arm_sxtbx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_U1: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarb (code, ins->dreg, ARMREG_LR);
arm_uxtbx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_I2: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarh (code, ins->dreg, ARMREG_LR);
arm_sxthx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_U2: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarh (code, ins->dreg, ARMREG_LR);
arm_uxthx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_I4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarw (code, ins->dreg, ARMREG_LR);
arm_sxtwx (code, ins->dreg, ins->dreg);
break;
}
case OP_ATOMIC_LOAD_U4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarw (code, ins->dreg, ARMREG_LR);
arm_movw (code, ins->dreg, ins->dreg); /* Clear upper half of the register. */
break;
}
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarx (code, ins->dreg, ARMREG_LR);
break;
}
case OP_ATOMIC_LOAD_R4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
if (cfg->r4fp) {
arm_ldarw (code, ARMREG_LR, ARMREG_LR);
arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR);
} else {
arm_ldarw (code, ARMREG_LR, ARMREG_LR);
arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR);
arm_fcvt_sd (code, ins->dreg, FP_TEMP_REG);
}
break;
}
case OP_ATOMIC_LOAD_R8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_basereg, ins->inst_offset);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
arm_ldarx (code, ARMREG_LR, ARMREG_LR);
arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_U1: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrb (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_U2: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrh (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrw (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_stlrx (code, ARMREG_LR, ins->sreg1);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_R4: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
if (cfg->r4fp) {
arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1);
arm_stlrw (code, ARMREG_LR, ARMREG_IP0);
} else {
arm_fcvt_ds (code, FP_TEMP_REG, ins->sreg1);
arm_fmov_double_to_rx (code, ARMREG_IP0, FP_TEMP_REG);
arm_stlrw (code, ARMREG_LR, ARMREG_IP0);
}
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
case OP_ATOMIC_STORE_R8: {
code = emit_addx_imm (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
arm_fmov_double_to_rx (code, ARMREG_IP0, ins->sreg1);
arm_stlrx (code, ARMREG_LR, ARMREG_IP0);
if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
arm_dmb (code, ARM_DMB_ISH);
break;
}
/* FP */
case OP_R8CONST: {
guint64 imm = *(guint64*)ins->inst_p0;
if (imm == 0) {
arm_fmov_rx_to_double (code, dreg, ARMREG_RZR);
} else {
code = emit_imm64 (code, ARMREG_LR, imm);
arm_fmov_rx_to_double (code, ins->dreg, ARMREG_LR);
}
break;
}
case OP_R4CONST: {
guint64 imm = *(guint32*)ins->inst_p0;
code = emit_imm64 (code, ARMREG_LR, imm);
if (cfg->r4fp) {
arm_fmov_rx_to_double (code, dreg, ARMREG_LR);
} else {
arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR);
arm_fcvt_sd (code, dreg, FP_TEMP_REG);
}
break;
}
case OP_LOADR8_MEMBASE:
code = emit_ldrfpx (code, dreg, ins->inst_basereg, ins->inst_offset);
break;
case OP_LOADR4_MEMBASE:
if (cfg->r4fp) {
code = emit_ldrfpw (code, dreg, ins->inst_basereg, ins->inst_offset);
} else {
code = emit_ldrfpw (code, FP_TEMP_REG, ins->inst_basereg, ins->inst_offset);
arm_fcvt_sd (code, dreg, FP_TEMP_REG);
}
break;
case OP_STORER8_MEMBASE_REG:
code = emit_strfpx (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
break;
case OP_STORER4_MEMBASE_REG:
if (cfg->r4fp) {
code = emit_strfpw (code, sreg1, ins->inst_destbasereg, ins->inst_offset);
} else {
arm_fcvt_ds (code, FP_TEMP_REG, sreg1);
code = emit_strfpw (code, FP_TEMP_REG, ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_FMOVE:
if (dreg != sreg1)
arm_fmovd (code, dreg, sreg1);
break;
case OP_RMOVE:
if (dreg != sreg1)
arm_fmovs (code, dreg, sreg1);
break;
case OP_MOVE_F_TO_I4:
if (cfg->r4fp) {
arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1);
} else {
arm_fcvt_ds (code, ins->dreg, ins->sreg1);
arm_fmov_double_to_rx (code, ins->dreg, ins->dreg);
}
break;
case OP_MOVE_I4_TO_F:
if (cfg->r4fp) {
arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1);
} else {
arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1);
arm_fcvt_sd (code, ins->dreg, ins->dreg);
}
break;
case OP_MOVE_F_TO_I8:
arm_fmov_double_to_rx (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_I8_TO_F:
arm_fmov_rx_to_double (code, ins->dreg, ins->sreg1);
break;
case OP_FCOMPARE:
arm_fcmpd (code, sreg1, sreg2);
break;
case OP_RCOMPARE:
arm_fcmps (code, sreg1, sreg2);
break;
case OP_FCONV_TO_I1:
arm_fcvtzs_dx (code, dreg, sreg1);
arm_sxtbx (code, dreg, dreg);
break;
case OP_FCONV_TO_U1:
arm_fcvtzu_dx (code, dreg, sreg1);
arm_uxtbw (code, dreg, dreg);
break;
case OP_FCONV_TO_I2:
arm_fcvtzs_dx (code, dreg, sreg1);
arm_sxthx (code, dreg, dreg);
break;
case OP_FCONV_TO_U2:
arm_fcvtzu_dx (code, dreg, sreg1);
arm_uxthw (code, dreg, dreg);
break;
case OP_FCONV_TO_I4:
arm_fcvtzs_dx (code, dreg, sreg1);
arm_sxtwx (code, dreg, dreg);
break;
case OP_FCONV_TO_U4:
arm_fcvtzu_dx (code, dreg, sreg1);
break;
case OP_FCONV_TO_I8:
case OP_FCONV_TO_I:
arm_fcvtzs_dx (code, dreg, sreg1);
break;
case OP_FCONV_TO_U8:
arm_fcvtzu_dx (code, dreg, sreg1);
break;
case OP_FCONV_TO_R4:
if (cfg->r4fp) {
arm_fcvt_ds (code, dreg, sreg1);
} else {
arm_fcvt_ds (code, FP_TEMP_REG, sreg1);
arm_fcvt_sd (code, dreg, FP_TEMP_REG);
}
break;
case OP_ICONV_TO_R4:
if (cfg->r4fp) {
arm_scvtf_rw_to_s (code, dreg, sreg1);
} else {
arm_scvtf_rw_to_s (code, FP_TEMP_REG, sreg1);
arm_fcvt_sd (code, dreg, FP_TEMP_REG);
}
break;
case OP_LCONV_TO_R4:
if (cfg->r4fp) {
arm_scvtf_rx_to_s (code, dreg, sreg1);
} else {
arm_scvtf_rx_to_s (code, FP_TEMP_REG, sreg1);
arm_fcvt_sd (code, dreg, FP_TEMP_REG);
}
break;
case OP_ICONV_TO_R8:
arm_scvtf_rw_to_d (code, dreg, sreg1);
break;
case OP_LCONV_TO_R8:
arm_scvtf_rx_to_d (code, dreg, sreg1);
break;
case OP_ICONV_TO_R_UN:
arm_ucvtf_rw_to_d (code, dreg, sreg1);
break;
case OP_LCONV_TO_R_UN:
arm_ucvtf_rx_to_d (code, dreg, sreg1);
break;
case OP_FADD:
arm_fadd_d (code, dreg, sreg1, sreg2);
break;
case OP_FSUB:
arm_fsub_d (code, dreg, sreg1, sreg2);
break;
case OP_FMUL:
arm_fmul_d (code, dreg, sreg1, sreg2);
break;
case OP_FDIV:
arm_fdiv_d (code, dreg, sreg1, sreg2);
break;
case OP_FREM:
/* Emulated */
g_assert_not_reached ();
break;
case OP_FNEG:
arm_fneg_d (code, dreg, sreg1);
break;
case OP_ARM_SETFREG_R4:
arm_fcvt_ds (code, dreg, sreg1);
break;
case OP_CKFINITE:
/* Check for infinity */
code = emit_imm64 (code, ARMREG_LR, 0x7fefffffffffffffLL);
arm_fmov_rx_to_double (code, FP_TEMP_REG, ARMREG_LR);
arm_fabs_d (code, FP_TEMP_REG2, sreg1);
arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG);
code = emit_cond_exc (cfg, code, OP_COND_EXC_GT, "ArithmeticException");
/* Check for nans */
arm_fcmpd (code, FP_TEMP_REG2, FP_TEMP_REG2);
code = emit_cond_exc (cfg, code, OP_COND_EXC_OV, "ArithmeticException");
arm_fmovd (code, dreg, sreg1);
break;
/* R4 */
case OP_RADD:
arm_fadd_s (code, dreg, sreg1, sreg2);
break;
case OP_RSUB:
arm_fsub_s (code, dreg, sreg1, sreg2);
break;
case OP_RMUL:
arm_fmul_s (code, dreg, sreg1, sreg2);
break;
case OP_RDIV:
arm_fdiv_s (code, dreg, sreg1, sreg2);
break;
case OP_RNEG:
arm_fneg_s (code, dreg, sreg1);
break;
case OP_RCONV_TO_I1:
arm_fcvtzs_sx (code, dreg, sreg1);
arm_sxtbx (code, dreg, dreg);
break;
case OP_RCONV_TO_U1:
arm_fcvtzu_sx (code, dreg, sreg1);
arm_uxtbw (code, dreg, dreg);
break;
case OP_RCONV_TO_I2:
arm_fcvtzs_sx (code, dreg, sreg1);
arm_sxthx (code, dreg, dreg);
break;
case OP_RCONV_TO_U2:
arm_fcvtzu_sx (code, dreg, sreg1);
arm_uxthw (code, dreg, dreg);
break;
case OP_RCONV_TO_I4:
arm_fcvtzs_sx (code, dreg, sreg1);
arm_sxtwx (code, dreg, dreg);
break;
case OP_RCONV_TO_U4:
arm_fcvtzu_sx (code, dreg, sreg1);
break;
case OP_RCONV_TO_I8:
case OP_RCONV_TO_I:
arm_fcvtzs_sx (code, dreg, sreg1);
break;
case OP_RCONV_TO_U8:
arm_fcvtzu_sx (code, dreg, sreg1);
break;
case OP_RCONV_TO_R8:
arm_fcvt_sd (code, dreg, sreg1);
break;
case OP_RCONV_TO_R4:
if (dreg != sreg1)
arm_fmovs (code, dreg, sreg1);
break;
case OP_RCEQ:
case OP_RCLT:
case OP_RCLT_UN:
case OP_RCGT:
case OP_RCGT_UN:
case OP_RCNEQ:
case OP_RCLE:
case OP_RCGE: {
int cond;
cond = opcode_to_armcond (ins->opcode);
arm_fcmps (code, sreg1, sreg2);
arm_cset (code, cond, dreg);
break;
}
/* CALLS */
case OP_VOIDCALL:
case OP_CALL:
case OP_LCALL:
case OP_FCALL:
case OP_RCALL:
case OP_VCALL2: {
call = (MonoCallInst*)ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
code = emit_move_return_value (cfg, code, ins);
break;
}
case OP_VOIDCALL_REG:
case OP_CALL_REG:
case OP_LCALL_REG:
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_VCALL2_REG:
code = mono_arm_emit_blrx (code, sreg1);
code = emit_move_return_value (cfg, code, ins);
break;
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
code = emit_ldrx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
code = mono_arm_emit_blrx (code, ARMREG_IP0);
code = emit_move_return_value (cfg, code, ins);
break;
case OP_TAILCALL_PARAMETER:
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL:
case OP_TAILCALL_MEMBASE:
case OP_TAILCALL_REG: {
int branch_reg = ARMREG_IP0;
guint64 free_reg = 1 << ARMREG_IP1;
call = (MonoCallInst*)ins;
g_assert (!cfg->method->save_lmf);
max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
while (G_UNLIKELY (offset + max_len > cfg->code_size)) {
cfg->code_size *= 2;
cfg->native_code = (unsigned char *)mono_realloc_native_code (cfg);
code = cfg->native_code + offset;
cfg->stat_code_reallocs++;
}
switch (ins->opcode) {
case OP_TAILCALL:
free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1);
break;
case OP_TAILCALL_REG:
g_assert (sreg1 != -1);
g_assert (sreg1 != ARMREG_IP0);
g_assert (sreg1 != ARMREG_IP1);
g_assert (sreg1 != ARMREG_LR);
g_assert (sreg1 != ARMREG_SP);
g_assert (sreg1 != ARMREG_R28);
if ((sreg1 << 1) & MONO_ARCH_CALLEE_SAVED_REGS) {
arm_movx (code, branch_reg, sreg1);
} else {
free_reg = (1 << ARMREG_IP0) | (1 << ARMREG_IP1);
branch_reg = sreg1;
}
break;
case OP_TAILCALL_MEMBASE:
g_assert (ins->inst_basereg != -1);
g_assert (ins->inst_basereg != ARMREG_IP0);
g_assert (ins->inst_basereg != ARMREG_IP1);
g_assert (ins->inst_basereg != ARMREG_LR);
g_assert (ins->inst_basereg != ARMREG_SP);
g_assert (ins->inst_basereg != ARMREG_R28);
code = emit_ldrx (code, branch_reg, ins->inst_basereg, ins->inst_offset);
break;
default:
g_assert_not_reached ();
}
// Copy stack arguments.
// FIXME a fixed size memcpy is desirable here,
// at least for larger values of stack_usage.
for (int i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
code = emit_ldrx (code, ARMREG_LR, ARMREG_SP, i);
code = emit_strx (code, ARMREG_LR, ARMREG_R28, i);
}
/* Restore registers */
code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset);
/* Destroy frame */
code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, free_reg);
if (enable_ptrauth)
/* There is no retab to authenticate lr */
arm_autibsp (code);
switch (ins->opcode) {
case OP_TAILCALL:
if (cfg->compile_aot) {
/* This is not a PLT patch */
code = emit_aotconst (cfg, code, branch_reg, MONO_PATCH_INFO_METHOD_JUMP, call->method);
} else {
mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method, MONO_R_ARM64_B);
arm_b (code, code);
cfg->thunk_area += THUNK_SIZE;
break;
}
// fallthrough
case OP_TAILCALL_MEMBASE:
case OP_TAILCALL_REG:
code = mono_arm_emit_brx (code, branch_reg);
break;
default:
g_assert_not_reached ();
}
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_ARGLIST:
g_assert (cfg->arch.cinfo);
code = emit_addx_imm (code, ARMREG_IP0, cfg->arch.args_reg, cfg->arch.cinfo->sig_cookie.offset);
arm_strx (code, ARMREG_IP0, sreg1, 0);
break;
case OP_DYN_CALL: {
MonoInst *var = cfg->dyn_call_var;
guint8 *labels [16];
int i;
/*
* sreg1 points to a DynCallArgs structure initialized by mono_arch_start_dyn_call ().
* sreg2 is the function to call.
*/
g_assert (var->opcode == OP_REGOFFSET);
arm_movx (code, ARMREG_LR, sreg1);
arm_movx (code, ARMREG_IP1, sreg2);
/* Save args buffer */
code = emit_strx (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
/* Set fp argument regs */
code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpargs));
arm_cmpw (code, ARMREG_R0, ARMREG_RZR);
labels [0] = code;
arm_bcc (code, ARMCOND_EQ, 0);
for (i = 0; i < 8; ++i)
code = emit_ldrfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8));
arm_patch_rel (labels [0], code, MONO_R_ARM64_BCC);
/* Allocate callee area */
code = emit_ldrx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
arm_lslw (code, ARMREG_R0, ARMREG_R0, 3);
arm_movspx (code, ARMREG_R1, ARMREG_SP);
arm_subx (code, ARMREG_R1, ARMREG_R1, ARMREG_R0);
arm_movspx (code, ARMREG_SP, ARMREG_R1);
/* Set stack args */
/* R1 = limit */
code = emit_ldrx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
/* R2 = pointer into 'regs' */
code = emit_imm (code, ARMREG_R2, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS + 1) * sizeof (target_mgreg_t)));
arm_addx (code, ARMREG_R2, ARMREG_LR, ARMREG_R2);
/* R3 = pointer to stack */
arm_movspx (code, ARMREG_R3, ARMREG_SP);
labels [0] = code;
arm_b (code, code);
labels [1] = code;
code = emit_ldrx (code, ARMREG_R5, ARMREG_R2, 0);
code = emit_strx (code, ARMREG_R5, ARMREG_R3, 0);
code = emit_addx_imm (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t));
code = emit_addx_imm (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t));
code = emit_subx_imm (code, ARMREG_R1, ARMREG_R1, 1);
arm_patch_rel (labels [0], code, MONO_R_ARM64_B);
arm_cmpw (code, ARMREG_R1, ARMREG_RZR);
arm_bcc (code, ARMCOND_GT, labels [1]);
/* Set argument registers + r8 */
code = mono_arm_emit_load_regarray (code, 0x1ff, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs));
/* Make the call */
code = mono_arm_emit_blrx (code, ARMREG_IP1);
/* Save result */
code = emit_ldrx (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
arm_strx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res));
arm_strx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, res2));
/* Save fp result */
code = emit_ldrw (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_fpret));
arm_cmpw (code, ARMREG_R0, ARMREG_RZR);
labels [1] = code;
arm_bcc (code, ARMCOND_EQ, 0);
for (i = 0; i < 8; ++i)
code = emit_strfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8));
arm_patch_rel (labels [1], code, MONO_R_ARM64_BCC);
break;
}
case OP_GENERIC_CLASS_INIT: {
int byte_offset;
guint8 *jump;
byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
/* Load vtable->initialized */
arm_ldrsbx (code, ARMREG_IP0, sreg1, byte_offset);
jump = code;
arm_cbnzx (code, ARMREG_IP0, 0);
/* Slowpath */
g_assert (sreg1 == ARMREG_R0);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
mono_arm_patch (jump, code, MONO_R_ARM64_CBZ);
break;
}
case OP_CHECK_THIS:
arm_ldrb (code, ARMREG_LR, sreg1, 0);
break;
case OP_NOT_NULL:
case OP_NOT_REACHED:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_I8CONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
/* EH */
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
case OP_COND_EXC_EQ:
case OP_COND_EXC_IEQ:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_INE_UN:
case OP_COND_EXC_ILT:
case OP_COND_EXC_LT:
case OP_COND_EXC_ILT_UN:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_IGT:
case OP_COND_EXC_GT:
case OP_COND_EXC_IGT_UN:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_IGE:
case OP_COND_EXC_GE:
case OP_COND_EXC_IGE_UN:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_ILE:
case OP_COND_EXC_LE:
case OP_COND_EXC_ILE_UN:
case OP_COND_EXC_LE_UN:
code = emit_cond_exc (cfg, code, ins->opcode, (const char*)ins->inst_p1);
break;
case OP_THROW:
if (sreg1 != ARMREG_R0)
arm_movx (code, ARMREG_R0, sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
break;
case OP_RETHROW:
if (sreg1 != ARMREG_R0)
arm_movx (code, ARMREG_R0, sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
break;
case OP_CALL_HANDLER:
mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_BL);
arm_bl (code, 0);
cfg->thunk_area += THUNK_SIZE;
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
break;
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/* Save caller address */
code = emit_strx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
/*
* Reserve a param area, see test_0_finally_param_area ().
* This is needed because the param area is not set up when
* we are called from EH code.
*/
if (cfg->param_area)
code = emit_subx_sp_imm (code, cfg->param_area);
break;
}
case OP_ENDFINALLY:
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (cfg->param_area)
code = emit_addx_sp_imm (code, cfg->param_area);
if (ins->opcode == OP_ENDFILTER && sreg1 != ARMREG_R0)
arm_movx (code, ARMREG_R0, sreg1);
/* Return to either after the branch in OP_CALL_HANDLER, or to the EH code */
code = emit_ldrx (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
arm_brx (code, ARMREG_LR);
break;
}
case OP_GET_EX_OBJ:
if (ins->dreg != ARMREG_R0)
arm_movx (code, ins->dreg, ARMREG_R0);
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
guint8 *buf [1];
arm_ldrx (code, ARMREG_IP1, ins->sreg1, 0);
/* Call it if it is non-null */
buf [0] = code;
arm_cbzx (code, ARMREG_IP1, 0);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
mono_arm_patch (buf [0], code, MONO_R_ARM64_CBZ);
break;
}
case OP_FILL_PROF_CALL_CTX:
for (int i = 0; i < MONO_MAX_IREGS; i++)
if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
arm_strx (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
}
}
set_code_cursor (cfg, code);
/*
* If the compiled code size is larger than the bcc displacement (19 bits signed),
* insert branch islands between/inside basic blocks.
*/
if (cfg->arch.cond_branch_islands)
code = emit_branch_island (cfg, code, start_offset);
}
static guint8*
emit_move_args (MonoCompile *cfg, guint8 *code)
{
MonoInst *ins;
CallInfo *cinfo;
ArgInfo *ainfo;
int i, part;
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
cinfo = cfg->arch.cinfo;
g_assert (cinfo);
for (i = 0; i < cinfo->nargs; ++i) {
ainfo = cinfo->args + i;
ins = cfg->args [i];
if (ins->opcode == OP_REGVAR) {
switch (ainfo->storage) {
case ArgInIReg:
arm_movx (code, ins->dreg, ainfo->reg);
if (i == 0 && sig->hasthis) {
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0);
}
break;
case ArgOnStack:
switch (ainfo->slot_size) {
case 1:
if (ainfo->sign)
code = emit_ldrsbx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
else
code = emit_ldrb (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
case 2:
if (ainfo->sign)
code = emit_ldrshx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
else
code = emit_ldrh (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
case 4:
if (ainfo->sign)
code = emit_ldrswx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
else
code = emit_ldrw (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
default:
code = emit_ldrx (code, ins->dreg, cfg->arch.args_reg, ainfo->offset);
break;
}
break;
default:
g_assert_not_reached ();
break;
}
} else {
if (ainfo->storage != ArgVtypeByRef && ainfo->storage != ArgVtypeByRefOnStack)
g_assert (ins->opcode == OP_REGOFFSET);
switch (ainfo->storage) {
case ArgInIReg:
/* Stack slots for arguments have size 8 */
code = emit_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
if (i == 0 && sig->hasthis) {
mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
break;
case ArgInFReg:
code = emit_strfpx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
break;
case ArgInFRegR4:
code = emit_strfpw (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
break;
case ArgOnStack:
case ArgOnStackR4:
case ArgOnStackR8:
case ArgVtypeByRefOnStack:
case ArgVtypeOnStack:
break;
case ArgVtypeByRef: {
MonoInst *addr_arg = ins->inst_left;
if (ainfo->gsharedvt) {
g_assert (ins->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
arm_strx (code, ainfo->reg, ins->inst_basereg, ins->inst_offset);
} else {
g_assert (ins->opcode == OP_VTARG_ADDR);
g_assert (addr_arg->opcode == OP_REGOFFSET);
arm_strx (code, ainfo->reg, addr_arg->inst_basereg, addr_arg->inst_offset);
}
break;
}
case ArgVtypeInIRegs:
for (part = 0; part < ainfo->nregs; part ++) {
code = emit_strx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + (part * 8));
}
break;
case ArgHFA:
for (part = 0; part < ainfo->nregs; part ++) {
if (ainfo->esize == 4)
code = emit_strfpw (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]);
else
code = emit_strfpx (code, ainfo->reg + part, ins->inst_basereg, ins->inst_offset + ainfo->foffsets [part]);
}
break;
default:
g_assert_not_reached ();
break;
}
}
}
return code;
}
/*
* emit_store_regarray:
*
* Emit code to store the registers in REGS into the appropriate elements of
* the register array at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
int i;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if (i + 1 < 32 && (regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
arm_stpx (code, i, i + 1, basereg, offset + (i * 8));
i++;
} else if (i == ARMREG_SP) {
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_strx (code, ARMREG_IP1, basereg, offset + (i * 8));
} else {
arm_strx (code, i, basereg, offset + (i * 8));
}
}
}
return code;
}
/*
* emit_load_regarray:
*
* Emit code to load the registers in REGS from the appropriate elements of
* the register array at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
int i;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
if (offset + (i * 8) < 500)
arm_ldpx (code, i, i + 1, basereg, offset + (i * 8));
else {
code = emit_ldrx (code, i, basereg, offset + (i * 8));
code = emit_ldrx (code, i + 1, basereg, offset + ((i + 1) * 8));
}
i++;
} else if (i == ARMREG_SP) {
g_assert_not_reached ();
} else {
code = emit_ldrx (code, i, basereg, offset + (i * 8));
}
}
}
return code;
}
/*
* emit_store_regset:
*
* Emit code to store the registers in REGS into consecutive memory locations starting
* at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset)
{
int i, pos;
pos = 0;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
arm_stpx (code, i, i + 1, basereg, offset + (pos * 8));
i++;
pos++;
} else if (i == ARMREG_SP) {
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
arm_strx (code, ARMREG_IP1, basereg, offset + (pos * 8));
} else {
arm_strx (code, i, basereg, offset + (pos * 8));
}
pos++;
}
}
return code;
}
/*
* emit_load_regset:
*
* Emit code to load the registers in REGS from consecutive memory locations starting
* at BASEREG+OFFSET.
*/
static WARN_UNUSED_RESULT guint8*
emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset)
{
int i, pos;
pos = 0;
for (i = 0; i < 32; ++i) {
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
arm_ldpx (code, i, i + 1, basereg, offset + (pos * 8));
i++;
pos++;
} else if (i == ARMREG_SP) {
g_assert_not_reached ();
} else {
arm_ldrx (code, i, basereg, offset + (pos * 8));
}
pos++;
}
}
return code;
}
WARN_UNUSED_RESULT guint8*
mono_arm_emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
return emit_load_regarray (code, regs, basereg, offset);
}
WARN_UNUSED_RESULT guint8*
mono_arm_emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset)
{
return emit_store_regarray (code, regs, basereg, offset);
}
WARN_UNUSED_RESULT guint8*
mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset)
{
return emit_store_regset (code, regs, basereg, offset);
}
/* Same as emit_store_regset, but emit unwind info too */
/* CFA_OFFSET is the offset between the CFA and basereg */
static WARN_UNUSED_RESULT guint8*
emit_store_regset_cfa (MonoCompile *cfg, guint8 *code, guint64 regs, int basereg, int offset, int cfa_offset, guint64 no_cfa_regset)
{
int i, j, pos, nregs;
guint32 cfa_regset = regs & ~no_cfa_regset;
pos = 0;
for (i = 0; i < 32; ++i) {
nregs = 1;
if (regs & (1 << i)) {
if ((regs & (1 << (i + 1))) && (i + 1 != ARMREG_SP)) {
if (offset < 256) {
arm_stpx (code, i, i + 1, basereg, offset + (pos * 8));
} else {
code = emit_strx (code, i, basereg, offset + (pos * 8));
code = emit_strx (code, i + 1, basereg, offset + (pos * 8) + 8);
}
nregs = 2;
} else if (i == ARMREG_SP) {
arm_movspx (code, ARMREG_IP1, ARMREG_SP);
code = emit_strx (code, ARMREG_IP1, basereg, offset + (pos * 8));
} else {
code = emit_strx (code, i, basereg, offset + (pos * 8));
}
for (j = 0; j < nregs; ++j) {
if (cfa_regset & (1 << (i + j)))
mono_emit_unwind_op_offset (cfg, code, i + j, (- cfa_offset) + offset + ((pos + j) * 8));
}
i += nregs - 1;
pos += nregs;
}
}
return code;
}
/*
* emit_setup_lmf:
*
* Emit code to initialize an LMF structure at LMF_OFFSET.
* Clobbers ip0/ip1.
*/
static guint8*
emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset)
{
/*
* The LMF should contain all the state required to be able to reconstruct the machine state
* at the current point of execution. Since the LMF is only read during EH, only callee
* saved etc. registers need to be saved.
* FIXME: Save callee saved fp regs, JITted code doesn't use them, but native code does, and they
* need to be restored during EH.
*/
/* pc */
arm_adrx (code, ARMREG_LR, code);
code = emit_strx (code, ARMREG_LR, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, pc));
/* gregs + fp + sp */
/* Don't emit unwind info for sp/fp, they are already handled in the prolog */
code = emit_store_regset_cfa (cfg, code, MONO_ARCH_LMF_REGS, ARMREG_FP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs), cfa_offset, (1 << ARMREG_FP) | (1 << ARMREG_SP));
return code;
}
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoMethodSignature *sig;
MonoBasicBlock *bb;
guint8 *code;
int cfa_offset, max_offset;
sig = mono_method_signature_internal (method);
cfg->code_size = 256 + sig->param_count * 64;
code = cfg->native_code = g_malloc (cfg->code_size);
/* This can be unaligned */
cfg->stack_offset = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
/*
* - Setup frame
*/
cfa_offset = 0;
mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
if (enable_ptrauth)
arm_pacibsp (code);
/* Setup frame */
if (arm_is_ldpx_imm (-cfg->stack_offset)) {
arm_stpx_pre (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, -cfg->stack_offset);
} else {
/* sp -= cfg->stack_offset */
/* This clobbers ip0/ip1 */
code = emit_subx_sp_imm (code, cfg->stack_offset);
arm_stpx (code, ARMREG_FP, ARMREG_LR, ARMREG_SP, 0);
}
cfa_offset += cfg->stack_offset;
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
mono_emit_unwind_op_offset (cfg, code, ARMREG_FP, (- cfa_offset) + 0);
mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, (- cfa_offset) + 8);
arm_movspx (code, ARMREG_FP, ARMREG_SP);
mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_FP);
if (cfg->param_area) {
/* The param area is below the frame pointer */
code = emit_subx_sp_imm (code, cfg->param_area);
}
if (cfg->method->save_lmf) {
code = emit_setup_lmf (cfg, code, cfg->lmf_var->inst_offset, cfa_offset);
} else {
/* Save gregs */
code = emit_store_regset_cfa (cfg, code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset, cfa_offset, 0);
}
/* Setup args reg */
if (cfg->arch.args_reg) {
/* The register was already saved above */
code = emit_addx_imm (code, cfg->arch.args_reg, ARMREG_FP, cfg->stack_offset);
}
/* Save return area addr received in R8 */
if (cfg->vret_addr) {
MonoInst *ins = cfg->vret_addr;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_strx (code, ARMREG_R8, ins->inst_basereg, ins->inst_offset);
}
/* Save mrgctx received in MONO_ARCH_RGCTX_REG */
if (cfg->rgctx_var) {
MonoInst *ins = cfg->rgctx_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_strx (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
}
/*
* Move arguments to their registers/stack locations.
*/
code = emit_move_args (cfg, code);
/* Initialize seq_point_info_var */
if (cfg->arch.seq_point_info_var) {
MonoInst *ins = cfg->arch.seq_point_info_var;
/* Initialize the variable from a GOT slot */
code = emit_aotconst (cfg, code, ARMREG_IP0, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_ldrx (code, ARMREG_IP1, ARMREG_IP0, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
code = emit_strx (code, ARMREG_IP1, ins->inst_basereg, ins->inst_offset);
} else {
MonoInst *ins;
if (cfg->arch.ss_tramp_var) {
/* Initialize ss_tramp_var */
ins = cfg->arch.ss_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_imm64 (code, ARMREG_IP0, (guint64)&ss_trampoline);
code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
}
if (cfg->arch.bp_tramp_var) {
/* Initialize bp_tramp_var */
ins = cfg->arch.bp_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
code = emit_imm64 (code, ARMREG_IP0, (guint64)bp_trampoline);
code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, ins->inst_offset);
}
}
max_offset = 0;
if (cfg->opt & MONO_OPT_BRANCH) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
bb->max_offset = max_offset;
MONO_BB_FOR_EACH_INS (bb, ins) {
max_offset += ins_get_size (ins->opcode);
}
}
}
if (max_offset > 0x3ffff * 4)
cfg->arch.cond_branch_islands = TRUE;
return code;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
CallInfo *cinfo;
int max_epilog_size;
guint8 *code;
int i;
max_epilog_size = 16 + 20*4;
code = realloc_code (cfg, max_epilog_size);
if (cfg->method->save_lmf) {
code = mono_arm_emit_load_regarray (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, gregs) - (MONO_ARCH_FIRST_LMF_REG * 8));
} else {
/* Restore gregs */
code = emit_load_regset (code, MONO_ARCH_CALLEE_SAVED_REGS & cfg->used_int_regs, ARMREG_FP, cfg->arch.saved_gregs_offset);
}
/* Load returned vtypes into registers if needed */
cinfo = cfg->arch.cinfo;
switch (cinfo->ret.storage) {
case ArgVtypeInIRegs: {
MonoInst *ins = cfg->ret;
for (i = 0; i < cinfo->ret.nregs; ++i)
code = emit_ldrx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * 8));
break;
}
case ArgHFA: {
MonoInst *ins = cfg->ret;
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
code = emit_ldrfpw (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]);
else
code = emit_ldrfpx (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + cinfo->ret.foffsets [i]);
}
break;
}
default:
break;
}
/* Destroy frame */
code = mono_arm_emit_destroy_frame (code, cfg->stack_offset, (1 << ARMREG_IP0) | (1 << ARMREG_IP1));
if (enable_ptrauth)
arm_retab (code);
else
arm_retx (code, ARMREG_LR);
g_assert (code - (cfg->native_code + cfg->code_len) < max_epilog_size);
set_code_cursor (cfg, code);
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *ji;
MonoClass *exc_class;
guint8 *code, *ip;
guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
int i, id, size = 0;
for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
exc_throw_pos [i] = NULL;
exc_throw_found [i] = 0;
}
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->type == MONO_PATCH_INFO_EXC) {
i = mini_exception_id_by_name ((const char*)ji->data.target);
if (!exc_throw_found [i]) {
size += 32;
exc_throw_found [i] = TRUE;
}
}
}
code = realloc_code (cfg, size);
/* Emit code to raise corlib exceptions */
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (ji->type != MONO_PATCH_INFO_EXC)
continue;
ip = cfg->native_code + ji->ip.i;
id = mini_exception_id_by_name ((const char*)ji->data.target);
if (exc_throw_pos [id]) {
/* ip points to the bcc () in OP_COND_EXC_... */
arm_patch_rel (ip, exc_throw_pos [id], ji->relocation);
ji->type = MONO_PATCH_INFO_NONE;
continue;
}
exc_throw_pos [id] = code;
arm_patch_rel (ip, code, ji->relocation);
/* We are being branched to from the code generated by emit_cond_exc (), the pc is in ip1 */
/* r0 = type token */
exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", ji->data.name);
code = emit_imm (code, ARMREG_R0, m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF);
/* r1 = throw ip */
arm_movx (code, ARMREG_R1, ARMREG_IP1);
/* Branch to the corlib exception throwing trampoline */
ji->ip.i = code - cfg->native_code;
ji->type = MONO_PATCH_INFO_JIT_ICALL_ID;
ji->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
ji->relocation = MONO_R_ARM64_BL;
arm_bl (code, 0);
cfg->thunk_area += THUNK_SIZE;
set_code_cursor (cfg, code);
}
set_code_cursor (cfg, code);
}
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return NULL;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return 0;
}
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int i, buf_len, imt_reg;
guint8 *buf, *code;
#if DEBUG_IMT
printf ("building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
}
#endif
buf_len = 0;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
buf_len += 4 * 4 + 4;
}
buf_len += 4;
if (item->has_target_code) {
buf_len += 5 * 4;
} else {
buf_len += 6 * 4;
}
if (fail_case) {
buf_len += 5 * 4;
}
} else {
buf_len += 6 * 4;
}
} else {
buf_len += 6 * 4;
}
}
if (fail_tramp) {
buf = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, buf_len);
} else {
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
buf = mono_mem_manager_code_reserve (mem_manager, buf_len);
}
code = buf;
MINI_BEGIN_CODEGEN ();
/*
* We are called by JITted code, which passes in the IMT argument in
* MONO_ARCH_RGCTX_REG (r27). We need to preserve all caller saved regs
* except ip0/ip1.
*/
imt_reg = MONO_ARCH_RGCTX_REG;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
if (item->is_equals) {
/*
* Check the imt argument against item->key, if equals, jump to either
* item->value.target_code or to vtable [item->value.vtable_slot].
* If fail_tramp is set, jump to it if not-equals.
*/
gboolean fail_case = !item->check_target_idx && fail_tramp;
if (item->check_target_idx || fail_case) {
/* Compare imt_reg with item->key */
if (!item->compare_done || fail_case) {
// FIXME: Optimize this
code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key);
arm_cmpx (code, imt_reg, ARMREG_IP0);
}
item->jmp_code = code;
arm_bcc (code, ARMCOND_NE, 0);
/* Jump to target if equals */
if (item->has_target_code) {
code = emit_imm64 (code, ARMREG_IP0, (guint64)item->value.target_code);
code = mono_arm_emit_brx (code, ARMREG_IP0);
} else {
guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]);
code = emit_imm64 (code, ARMREG_IP0, imm);
arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0);
code = mono_arm_emit_brx (code, ARMREG_IP0);
}
if (fail_case) {
arm_patch_rel (item->jmp_code, code, MONO_R_ARM64_BCC);
item->jmp_code = NULL;
code = emit_imm64 (code, ARMREG_IP0, (guint64)fail_tramp);
code = mono_arm_emit_brx (code, ARMREG_IP0);
}
} else {
guint64 imm = (guint64)&(vtable->vtable [item->value.vtable_slot]);
code = emit_imm64 (code, ARMREG_IP0, imm);
arm_ldrx (code, ARMREG_IP0, ARMREG_IP0, 0);
code = mono_arm_emit_brx (code, ARMREG_IP0);
}
} else {
code = emit_imm64 (code, ARMREG_IP0, (guint64)item->key);
arm_cmpx (code, imt_reg, ARMREG_IP0);
item->jmp_code = code;
arm_bcc (code, ARMCOND_HS, 0);
}
}
/* Patch the branches */
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code && item->check_target_idx)
arm_patch_rel (item->jmp_code, imt_entries [item->check_target_idx]->code_target, MONO_R_ARM64_BCC);
}
g_assert ((code - buf) <= buf_len);
MINI_END_CODEGEN (buf, code - buf, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
return MINI_ADDR_TO_FTNPTR (buf);
}
GSList *
mono_arch_get_trampolines (gboolean aot)
{
return mono_arm_get_exception_trampolines (aot);
}
#else /* DISABLE_JIT */
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
g_assert_not_reached ();
return NULL;
}
#endif /* !DISABLE_JIT */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = MINI_FTNPTR_TO_ADDR (ip);
guint32 native_offset = ip - (guint8*)ji->code_start;
if (ji->from_aot) {
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (enable_ptrauth)
NOT_IMPLEMENTED;
g_assert (native_offset % 4 == 0);
g_assert (info->bp_addrs [native_offset / 4] == 0);
info->bp_addrs [native_offset / 4] = (guint8*)mini_get_breakpoint_trampoline ();
} else {
/* ip points to an ldrx */
code += 4;
mono_codeman_enable_write ();
code = mono_arm_emit_blrx (code, ARMREG_IP0);
mono_codeman_disable_write ();
mono_arch_flush_icache (ip, code - ip);
}
}
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = MINI_FTNPTR_TO_ADDR (ip);
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info ((guint8*)ji->code_start);
if (enable_ptrauth)
NOT_IMPLEMENTED;
g_assert (native_offset % 4 == 0);
info->bp_addrs [native_offset / 4] = NULL;
} else {
/* ip points to an ldrx */
code += 4;
mono_codeman_enable_write ();
arm_nop (code);
mono_codeman_disable_write ();
mono_arch_flush_icache (ip, code - ip);
}
}
void
mono_arch_start_single_stepping (void)
{
ss_trampoline = mini_get_single_step_trampoline ();
}
void
mono_arch_stop_single_stepping (void)
{
ss_trampoline = NULL;
}
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
/* We use soft breakpoints on arm64 */
return FALSE;
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
/* We use soft breakpoints on arm64 */
return FALSE;
}
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
g_assert_not_reached ();
}
void
mono_arch_skip_single_step (MonoContext *ctx)
{
g_assert_not_reached ();
}
SeqPointInfo*
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
jit_mm = get_default_jit_mm ();
// FIXME: Add a free function
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size / 4) * sizeof(guint8*));
info->ss_tramp_addr = &ss_trampoline;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
#endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_U8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8:
return TRUE;
default:
return FALSE;
}
}
CallInfo*
mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
return get_call_info (mp, sig);
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
gpointer target = NULL;
switch (jit_icall_id) {
#undef MONO_AOT_ICALL
#define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
MONO_AOT_ICALL (mono_arm_resume_unwind)
MONO_AOT_ICALL (mono_arm_start_gsharedvt_call)
MONO_AOT_ICALL (mono_arm_throw_exception)
}
return target;
}
static guint8*
emit_blrx (guint8 *code, int reg)
{
if (enable_ptrauth)
arm_blraaz (code, reg);
else
arm_blrx (code, reg);
return code;
}
static guint8*
emit_brx (guint8 *code, int reg)
{
if (enable_ptrauth)
arm_braaz (code, reg);
else
arm_brx (code, reg);
return code;
}
guint8*
mono_arm_emit_blrx (guint8 *code, int reg)
{
return emit_blrx (code, reg);
}
guint8*
mono_arm_emit_brx (guint8 *code, int reg)
{
return emit_brx (code, reg);
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-arm64.h | /**
* \file
*
* Copyright 2013 Xamarin Inc
*
* Based on mini-arm.h:
*
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_MINI_ARM64_H__
#define __MONO_MINI_ARM64_H__
#include <mono/arch/arm64/arm64-codegen.h>
#include <mono/mini/mini-arm64-gsharedvt.h>
#define MONO_ARCH_CPU_SPEC mono_arm64_cpu_desc
#define MONO_MAX_IREGS 32
#define MONO_MAX_FREGS 32
#define MONO_MAX_XREGS 32
#if !defined(DISABLE_SIMD)
#define MONO_ARCH_SIMD_INTRINSICS 1
#endif
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->regs [0] = (gsize)exc; } while (0)
#if defined(HOST_WIN32)
#define __builtin_extract_return_addr(x) x
#define __builtin_return_address(x) _ReturnAddress()
#define __builtin_frame_address(x) _AddressOfReturnAddress()
#endif
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,func) do { \
MONO_CONTEXT_SET_BP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_SP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_IP ((ctx), (func)); \
} while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
/* Parameters used by the register allocator */
/* r0..r7, r9..r14 (r15 is the imt/rgctx reg) */
#define MONO_ARCH_CALLEE_REGS 0xfeff
/* r19..r28 */
#define MONO_ARCH_CALLEE_SAVED_REGS (0x3ff << 19)
/* v16/v17 is reserved for a scratch reg */
#define MONO_ARCH_CALLEE_FREGS 0xfffc00ff
/* v8..v15 */
#define MONO_ARCH_CALLEE_SAVED_FREGS 0xff00
#define MONO_ARCH_CALLEE_SAVED_XREGS 0
#define MONO_ARCH_CALLEE_XREGS MONO_ARCH_CALLEE_FREGS
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_SREG2_MASK(ins) (0)
#define MONO_ARCH_INST_FIXED_REG(desc) ((desc) == 'a' ? ARMREG_R0 : -1)
#define MONO_ARCH_INST_IS_REGPAIR(desc) (0)
#define MONO_ARCH_INST_IS_FLOAT(desc) ((desc) == 'f')
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (-1)
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_FRAME_ALIGNMENT 16
#define MONO_ARCH_CODE_ALIGNMENT 32
/* callee saved regs + fp + sp */
#define MONO_ARCH_LMF_REGS ((0x3ff << 19) | (1 << ARMREG_FP) | (1 << ARMREG_SP))
#define MONO_ARCH_NUM_LMF_REGS (10 + 2)
#define MONO_ARCH_FIRST_LMF_REG ARMREG_R19
#define MONO_ARCH_LMF_REG_FP 10
#define MONO_ARCH_LMF_REG_SP 11
struct MonoLMF {
/*
* If the second lowest bit is set to 1, then this is a MonoLMFExt structure, and
* the other fields are not valid.
*/
gpointer previous_lmf;
gpointer lmf_addr;
host_mgreg_t pc;
host_mgreg_t gregs [MONO_ARCH_NUM_LMF_REGS];
};
/* Structure used by the sequence points in AOTed code */
struct SeqPointInfo {
gpointer ss_tramp_addr;
guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
};
#define PARAM_REGS 8
#define FP_PARAM_REGS 8
typedef struct {
host_mgreg_t res, res2;
guint8 *ret;
double fpregs [FP_PARAM_REGS];
int n_fpargs, n_fpret, n_stackargs;
/* This should come last as the structure is dynamically extended */
/* The +1 is for r8 */
host_mgreg_t regs [PARAM_REGS + 1];
} DynCallArgs;
typedef struct {
CallInfo *cinfo;
int saved_gregs_offset;
/* Points to arguments received on the stack */
int args_reg;
gboolean cond_branch_islands;
MonoInst *vret_addr_loc;
MonoInst *seq_point_info_var;
MonoInst *ss_tramp_var;
MonoInst *bp_tramp_var;
guint8 *thunks;
int thunks_size;
} MonoCompileArch;
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
#ifdef MONO_ARCH_ILP32
/* For the watch (starting with series 4), a new ABI is introduced: arm64_32.
* We can still use the older AOT compiler to produce bitcode, because it's
* "offset compatible". However, since it is targeting arm7k, it makes certain
* assumptions that we need to align here. */
#define MONO_ARCH_EMULATE_FCONV_TO_I8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R4 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8_UN 1
#define MONO_ARCH_EMULATE_DIV 1
#define MONO_ARCH_EMULATE_CONV_R8_UN 1
#else
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS 1
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS 1
#endif
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_EMULATE_LONG_MUL_OVF_OPTS 1
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_EMULATE_MUL_OVF 1
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#define MONO_ARCH_RGCTX_REG ARMREG_R15
#define MONO_ARCH_IMT_REG MONO_ARCH_RGCTX_REG
#define MONO_ARCH_VTABLE_REG ARMREG_R0
#define MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE 1
#define MONO_ARCH_USE_SIGACTION 1
#ifdef HOST_TVOS
#define MONO_ARCH_HAS_NO_PROPER_MONOCTX 1
#endif
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE 1
#define MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED 1
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_HAVE_EXCEPTIONS_INIT 1
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_DYN_CALL_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_PARAM_AREA 0
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
#define MONO_ARCH_HAVE_OBJC_GET_SELECTOR 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_HAVE_OPCODE_NEEDS_EMULATION 1
#define MONO_ARCH_HAVE_DECOMPOSE_LONG_OPTS 1
#define MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-i64:64-i128:128-n32:64-S128"
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 1
#ifdef TARGET_IOS
#define MONO_ARCH_REDZONE_SIZE 128
#else
#define MONO_ARCH_REDZONE_SIZE 0
#endif
#if defined(TARGET_IOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#endif
#if defined(TARGET_TVOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_EXPLICIT_NULL_CHECKS 1
#endif
/* Relocations */
#define MONO_R_ARM64_B 1
#define MONO_R_ARM64_BCC 2
#define MONO_R_ARM64_IMM 3
#define MONO_R_ARM64_BL 4
#define MONO_R_ARM64_BL_SHORT 5
#define MONO_R_ARM64_CBZ 6
typedef enum {
ArgInIReg,
ArgInFReg,
ArgInFRegR4,
ArgOnStack,
ArgOnStackR8,
ArgOnStackR4,
/*
* Vtype passed in consecutive int registers.
* ainfo->reg is the firs register,
* ainfo->nregs is the number of registers,
* ainfo->size is the size of the structure.
*/
ArgVtypeInIRegs,
ArgVtypeByRef,
ArgVtypeByRefOnStack,
ArgVtypeOnStack,
ArgHFA,
ArgNone
} ArgStorage;
typedef struct {
ArgStorage storage;
int reg;
/* ArgOnStack */
int offset;
/* ArgVtypeInIRegs/ArgHFA */
int nregs, size;
/* ArgHFA */
int esize;
/* ArgHFA */
/* The offsets of the float values inside the arg */
guint16 foffsets [4];
/* ArgOnStack */
int slot_size;
/* hfa */
int nfregs_to_skip;
gboolean sign;
gboolean gsharedvt;
gboolean hfa;
} ArgInfo;
struct CallInfo {
int nargs;
int gr, fr, stack_usage;
gboolean pinvoke, vararg;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
};
typedef struct {
/* General registers + ARMREG_R8 for indirect returns */
host_mgreg_t gregs [PARAM_REGS + 1];
/* Floating registers */
double fregs [FP_PARAM_REGS];
/* Stack usage, used for passing params on stack */
guint32 stack_size;
guint8* stack;
} CallContext;
guint8* mono_arm_emit_imm64 (guint8 *code, int dreg, gint64 imm);
guint8* mono_arm_emit_ldrx (guint8 *code, int rt, int rn, int imm);
guint8* mono_arm_emit_destroy_frame (guint8 *code, int stack_offset, guint64 temp_regs);
guint8* mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset);
guint8* mono_arm_emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset);
guint8* mono_arm_emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset);
/* MonoJumpInfo **ji */
guint8* mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *code_start, int dreg, guint32 patch_type, gconstpointer data);
guint8* mono_arm_emit_brx (guint8 *code, int reg);
guint8* mono_arm_emit_blrx (guint8 *code, int reg);
void mono_arm_patch (guint8 *code, guint8 *target, int relocation);
void mono_arm_throw_exception (gpointer arg, host_mgreg_t pc, host_mgreg_t *int_regs, gdouble *fp_regs, gboolean corlib, gboolean rethrow, gboolean preserve_ips);
void mono_arm_gsharedvt_init (void);
GSList* mono_arm_get_exception_trampolines (gboolean aot);
void mono_arm_resume_unwind (gpointer arg, host_mgreg_t pc, host_mgreg_t *int_regs, gdouble *fp_regs, gboolean corlib, gboolean rethrow);
CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig);
#endif /* __MONO_MINI_ARM64_H__ */
| /**
* \file
*
* Copyright 2013 Xamarin Inc
*
* Based on mini-arm.h:
*
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_MINI_ARM64_H__
#define __MONO_MINI_ARM64_H__
#include <mono/arch/arm64/arm64-codegen.h>
#include <mono/mini/mini-arm64-gsharedvt.h>
#define MONO_ARCH_CPU_SPEC mono_arm64_cpu_desc
#define MONO_MAX_IREGS 32
#define MONO_MAX_FREGS 32
#define MONO_MAX_XREGS 32
#if !defined(DISABLE_SIMD)
#define MONO_ARCH_SIMD_INTRINSICS 1
#endif
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->regs [0] = (gsize)exc; } while (0)
#if defined(HOST_WIN32)
#define __builtin_extract_return_addr(x) x
#define __builtin_return_address(x) _ReturnAddress()
#define __builtin_frame_address(x) _AddressOfReturnAddress()
#endif
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,func) do { \
MONO_CONTEXT_SET_BP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_SP ((ctx), __builtin_frame_address (0)); \
MONO_CONTEXT_SET_IP ((ctx), (func)); \
} while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf)
/* Parameters used by the register allocator */
/* r0..r7, r9..r14 (r15 is the imt/rgctx reg) */
#define MONO_ARCH_CALLEE_REGS 0xfeff
/* r19..r28 */
#define MONO_ARCH_CALLEE_SAVED_REGS (0x3ff << 19)
/* v16/v17 is reserved for a scratch reg */
#define MONO_ARCH_CALLEE_FREGS 0xfffc00ff
/* v8..v15 */
#define MONO_ARCH_CALLEE_SAVED_FREGS 0xff00
#define MONO_ARCH_CALLEE_SAVED_XREGS 0
#define MONO_ARCH_CALLEE_XREGS MONO_ARCH_CALLEE_FREGS
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_SREG2_MASK(ins) (0)
#define MONO_ARCH_INST_FIXED_REG(desc) ((desc) == 'a' ? ARMREG_R0 : -1)
#define MONO_ARCH_INST_IS_REGPAIR(desc) (0)
#define MONO_ARCH_INST_IS_FLOAT(desc) ((desc) == 'f')
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (-1)
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_FRAME_ALIGNMENT 16
#define MONO_ARCH_CODE_ALIGNMENT 32
/* callee saved regs + fp + sp */
#define MONO_ARCH_LMF_REGS ((0x3ff << 19) | (1 << ARMREG_FP) | (1 << ARMREG_SP))
#define MONO_ARCH_NUM_LMF_REGS (10 + 2)
#define MONO_ARCH_FIRST_LMF_REG ARMREG_R19
#define MONO_ARCH_LMF_REG_FP 10
#define MONO_ARCH_LMF_REG_SP 11
struct MonoLMF {
/*
* If the second lowest bit is set to 1, then this is a MonoLMFExt structure, and
* the other fields are not valid.
*/
gpointer previous_lmf;
gpointer lmf_addr;
host_mgreg_t pc;
host_mgreg_t gregs [MONO_ARCH_NUM_LMF_REGS];
};
/* Structure used by the sequence points in AOTed code */
struct SeqPointInfo {
gpointer ss_tramp_addr;
guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
};
#define PARAM_REGS 8
#define FP_PARAM_REGS 8
typedef struct {
host_mgreg_t res, res2;
guint8 *ret;
double fpregs [FP_PARAM_REGS];
int n_fpargs, n_fpret, n_stackargs;
/* This should come last as the structure is dynamically extended */
/* The +1 is for r8 */
host_mgreg_t regs [PARAM_REGS + 1];
} DynCallArgs;
typedef struct {
CallInfo *cinfo;
int saved_gregs_offset;
/* Points to arguments received on the stack */
int args_reg;
gboolean cond_branch_islands;
MonoInst *vret_addr_loc;
MonoInst *seq_point_info_var;
MonoInst *ss_tramp_var;
MonoInst *bp_tramp_var;
guint8 *thunks;
int thunks_size;
} MonoCompileArch;
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
#ifdef MONO_ARCH_ILP32
/* For the watch (starting with series 4), a new ABI is introduced: arm64_32.
* We can still use the older AOT compiler to produce bitcode, because it's
* "offset compatible". However, since it is targeting arm7k, it makes certain
* assumptions that we need to align here. */
#define MONO_ARCH_EMULATE_FCONV_TO_I8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8 1
#define MONO_ARCH_EMULATE_LCONV_TO_R4 1
#define MONO_ARCH_EMULATE_LCONV_TO_R8_UN 1
#define MONO_ARCH_EMULATE_DIV 1
#define MONO_ARCH_EMULATE_CONV_R8_UN 1
#else
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS 1
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS 1
#endif
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_EMULATE_LONG_MUL_OVF_OPTS 1
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_EMULATE_MUL_OVF 1
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#define MONO_ARCH_RGCTX_REG ARMREG_R15
#define MONO_ARCH_IMT_REG MONO_ARCH_RGCTX_REG
#define MONO_ARCH_VTABLE_REG ARMREG_R0
#define MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE 1
#define MONO_ARCH_USE_SIGACTION 1
#ifdef HOST_TVOS
#define MONO_ARCH_HAS_NO_PROPER_MONOCTX 1
#endif
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_HAVE_INTERP_ENTRY_TRAMPOLINE 1
#define MONO_ARCH_HAVE_INTERP_NATIVE_TO_MANAGED 1
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_HAVE_EXCEPTIONS_INIT 1
#define MONO_ARCH_HAVE_GET_TRAMPOLINES 1
#define MONO_ARCH_DYN_CALL_SUPPORTED 1
#define MONO_ARCH_DYN_CALL_PARAM_AREA 0
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_GENERAL_RGCTX_LAZY_FETCH_TRAMPOLINE 1
#define MONO_ARCH_HAVE_OBJC_GET_SELECTOR 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_HAVE_OPCODE_NEEDS_EMULATION 1
#define MONO_ARCH_HAVE_DECOMPOSE_LONG_OPTS 1
#define MONO_ARCH_FLOAT32_SUPPORTED 1
#define MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-i64:64-i128:128-n32:64-S128"
#ifdef TARGET_OSX
#define MONO_ARCH_FORCE_FLOAT32 1
#endif
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 1
#ifdef TARGET_IOS
#define MONO_ARCH_REDZONE_SIZE 128
#else
#define MONO_ARCH_REDZONE_SIZE 0
#endif
#if defined(TARGET_IOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#endif
#if defined(TARGET_TVOS) || defined(TARGET_WATCHOS)
#define MONO_ARCH_EXPLICIT_NULL_CHECKS 1
#endif
/* Relocations */
#define MONO_R_ARM64_B 1
#define MONO_R_ARM64_BCC 2
#define MONO_R_ARM64_IMM 3
#define MONO_R_ARM64_BL 4
#define MONO_R_ARM64_BL_SHORT 5
#define MONO_R_ARM64_CBZ 6
typedef enum {
ArgInIReg,
ArgInFReg,
ArgInFRegR4,
ArgOnStack,
ArgOnStackR8,
ArgOnStackR4,
/*
* Vtype passed in consecutive int registers.
* ainfo->reg is the firs register,
* ainfo->nregs is the number of registers,
* ainfo->size is the size of the structure.
*/
ArgVtypeInIRegs,
ArgVtypeByRef,
ArgVtypeByRefOnStack,
ArgVtypeOnStack,
ArgHFA,
ArgNone
} ArgStorage;
typedef struct {
ArgStorage storage;
int reg;
/* ArgOnStack */
int offset;
/* ArgVtypeInIRegs/ArgHFA */
int nregs, size;
/* ArgHFA */
int esize;
/* ArgHFA */
/* The offsets of the float values inside the arg */
guint16 foffsets [4];
/* ArgOnStack */
int slot_size;
/* hfa */
int nfregs_to_skip;
gboolean sign;
gboolean gsharedvt;
gboolean hfa;
} ArgInfo;
struct CallInfo {
int nargs;
int gr, fr, stack_usage;
gboolean pinvoke, vararg;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
};
typedef struct {
/* General registers + ARMREG_R8 for indirect returns */
host_mgreg_t gregs [PARAM_REGS + 1];
/* Floating registers */
double fregs [FP_PARAM_REGS];
/* Stack usage, used for passing params on stack */
guint32 stack_size;
guint8* stack;
} CallContext;
guint8* mono_arm_emit_imm64 (guint8 *code, int dreg, gint64 imm);
guint8* mono_arm_emit_ldrx (guint8 *code, int rt, int rn, int imm);
guint8* mono_arm_emit_destroy_frame (guint8 *code, int stack_offset, guint64 temp_regs);
guint8* mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset);
guint8* mono_arm_emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset);
guint8* mono_arm_emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset);
/* MonoJumpInfo **ji */
guint8* mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *code_start, int dreg, guint32 patch_type, gconstpointer data);
guint8* mono_arm_emit_brx (guint8 *code, int reg);
guint8* mono_arm_emit_blrx (guint8 *code, int reg);
void mono_arm_patch (guint8 *code, guint8 *target, int relocation);
void mono_arm_throw_exception (gpointer arg, host_mgreg_t pc, host_mgreg_t *int_regs, gdouble *fp_regs, gboolean corlib, gboolean rethrow, gboolean preserve_ips);
void mono_arm_gsharedvt_init (void);
GSList* mono_arm_get_exception_trampolines (gboolean aot);
void mono_arm_resume_unwind (gpointer arg, host_mgreg_t pc, host_mgreg_t *int_regs, gdouble *fp_regs, gboolean corlib, gboolean rethrow);
CallInfo* mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig);
#endif /* __MONO_MINI_ARM64_H__ */
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-llvm.c | /**
* \file
* llvm "Backend" for the mono JIT
*
* Copyright 2009-2011 Novell Inc (http://www.novell.com)
* Copyright 2011 Xamarin Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "config.h"
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/debug-internals.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/object-internals.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/tokentype.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/mono-dl.h>
#include <mono/utils/mono-time.h>
#include <mono/utils/freebsd-dwarf.h>
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "llvm-c/BitWriter.h"
#include "llvm-c/Analysis.h"
#include "mini-llvm-cpp.h"
#include "llvm-jit.h"
#include "aot-compiler.h"
#include "mini-llvm.h"
#include "mini-runtime.h"
#include <mono/utils/mono-math.h>
#ifndef DISABLE_JIT
#if defined(TARGET_AMD64) && defined(TARGET_WIN32) && defined(HOST_WIN32) && defined(_MSC_VER)
#define TARGET_X86_64_WIN32_MSVC
#endif
#if defined(TARGET_X86_64_WIN32_MSVC)
#define TARGET_WIN32_MSVC
#endif
#if LLVM_API_VERSION < 900
#error "The version of the mono llvm repository is too old."
#endif
/*
* Information associated by mono with LLVM modules.
*/
typedef struct {
LLVMModuleRef lmodule;
LLVMValueRef throw_icall, rethrow, throw_corlib_exception;
GHashTable *llvm_types;
LLVMValueRef dummy_got_var;
const char *get_method_symbol;
const char *get_unbox_tramp_symbol;
const char *init_aotconst_symbol;
GHashTable *plt_entries;
GHashTable *plt_entries_ji;
GHashTable *method_to_lmethod;
GHashTable *method_to_call_info;
GHashTable *lvalue_to_lcalls;
GHashTable *direct_callables;
/* Maps got slot index -> LLVMValueRef */
GHashTable *aotconst_vars;
char **bb_names;
int bb_names_len;
GPtrArray *used;
LLVMTypeRef ptr_type;
GPtrArray *subprogram_mds;
MonoEERef *mono_ee;
LLVMExecutionEngineRef ee;
gboolean external_symbols;
gboolean emit_dwarf;
int max_got_offset;
LLVMValueRef personality;
gpointer gc_poll_cold_wrapper_compiled;
/* For AOT */
MonoAssembly *assembly;
char *global_prefix;
MonoAotFileInfo aot_info;
const char *eh_frame_symbol;
LLVMValueRef get_method, get_unbox_tramp, init_aotconst_func;
LLVMValueRef init_methods [AOT_INIT_METHOD_NUM];
LLVMValueRef code_start, code_end;
LLVMValueRef inited_var;
LLVMValueRef unbox_tramp_indexes;
LLVMValueRef unbox_trampolines;
LLVMValueRef gc_poll_cold_wrapper;
LLVMValueRef info_var;
LLVMTypeRef *info_var_eltypes;
int max_inited_idx, max_method_idx;
gboolean has_jitted_code;
gboolean static_link;
gboolean llvm_only;
gboolean interp;
GHashTable *idx_to_lmethod;
GHashTable *idx_to_unbox_tramp;
GPtrArray *callsite_list;
LLVMContextRef context;
LLVMValueRef sentinel_exception;
LLVMValueRef gc_safe_point_flag_var;
LLVMValueRef interrupt_flag_var;
void *di_builder, *cu;
GHashTable *objc_selector_to_var;
GPtrArray *cfgs;
int unbox_tramp_num, unbox_tramp_elemsize;
GHashTable *got_idx_to_type;
GHashTable *no_method_table_lmethods;
} MonoLLVMModule;
/*
* Information associated by the backend with mono basic blocks.
*/
typedef struct {
LLVMBasicBlockRef bblock, end_bblock;
LLVMValueRef finally_ind;
gboolean added, invoke_target;
/*
* If this bblock is the start of a finally clause, this is a list of bblocks it
* needs to branch to in ENDFINALLY.
*/
GSList *call_handler_return_bbs;
/*
* If this bblock is the start of a finally clause, this is the bblock that
* CALL_HANDLER needs to branch to.
*/
LLVMBasicBlockRef call_handler_target_bb;
/* The list of switch statements generated by ENDFINALLY instructions */
GSList *endfinally_switch_ins_list;
GSList *phi_nodes;
} BBInfo;
/*
* Structure containing emit state
*/
typedef struct {
MonoMemPool *mempool;
/* Maps method names to the corresponding LLVMValueRef */
GHashTable *emitted_method_decls;
MonoCompile *cfg;
LLVMValueRef lmethod;
MonoLLVMModule *module;
LLVMModuleRef lmodule;
BBInfo *bblocks;
int sindex, default_index, ex_index;
LLVMBuilderRef builder;
LLVMValueRef *values, *addresses;
MonoType **vreg_cli_types;
LLVMCallInfo *linfo;
MonoMethodSignature *sig;
GSList *builders;
GHashTable *region_to_handler;
GHashTable *clause_to_handler;
LLVMBuilderRef alloca_builder;
LLVMValueRef last_alloca;
LLVMValueRef rgctx_arg;
LLVMValueRef this_arg;
LLVMTypeRef *vreg_types;
gboolean *is_vphi;
LLVMTypeRef method_type;
LLVMBasicBlockRef init_bb, inited_bb;
gboolean *is_dead;
gboolean *unreachable;
gboolean llvm_only;
gboolean has_got_access;
gboolean is_linkonce;
gboolean emit_dummy_arg;
gboolean has_safepoints;
gboolean has_catch;
int this_arg_pindex, rgctx_arg_pindex;
LLVMValueRef imt_rgctx_loc;
GHashTable *llvm_types;
LLVMValueRef dbg_md;
MonoDebugMethodInfo *minfo;
/* For every clause, the clauses it is nested in */
GSList **nested_in;
LLVMValueRef ex_var;
GHashTable *exc_meta;
GPtrArray *callsite_list;
GPtrArray *phi_values;
GPtrArray *bblock_list;
char *method_name;
GHashTable *jit_callees;
LLVMValueRef long_bb_break_var;
int *gc_var_indexes;
LLVMValueRef gc_pin_area;
LLVMValueRef il_state;
LLVMValueRef il_state_ret;
} EmitContext;
typedef struct {
MonoBasicBlock *bb;
MonoInst *phi;
MonoBasicBlock *in_bb;
int sreg;
} PhiNode;
/*
* Instruction metadata
* This is the same as ins_info, but LREG != IREG.
*/
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
#define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
#define NONE ' '
#define IREG 'i'
#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
#define LREG 'l'
/* keep in sync with the enum in mini.h */
const char
mini_llvm_ins_info[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
#if TARGET_SIZEOF_VOID_P == 4
#define GET_LONG_IMM(ins) ((ins)->inst_l)
#else
#define GET_LONG_IMM(ins) ((ins)->inst_imm)
#endif
#define LLVM_INS_INFO(opcode) (&mini_llvm_ins_info [((opcode) - OP_START - 1) * 4])
#if 0
#define TRACE_FAILURE(msg) do { printf ("%s\n", msg); } while (0)
#else
#define TRACE_FAILURE(msg)
#endif
#ifdef TARGET_X86
#define IS_TARGET_X86 1
#else
#define IS_TARGET_X86 0
#endif
#ifdef TARGET_AMD64
#define IS_TARGET_AMD64 1
#else
#define IS_TARGET_AMD64 0
#endif
#define ctx_ok(ctx) (!(ctx)->cfg->disable_llvm)
enum {
MAX_VECTOR_ELEMS = 32, // 2 vectors * 128 bits per vector / 8 bits per element
ARM64_MAX_VECTOR_ELEMS = 16,
};
const int mask_0_incr_1 [] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
};
static LLVMIntPredicate cond_to_llvm_cond [] = {
LLVMIntEQ,
LLVMIntNE,
LLVMIntSLE,
LLVMIntSGE,
LLVMIntSLT,
LLVMIntSGT,
LLVMIntULE,
LLVMIntUGE,
LLVMIntULT,
LLVMIntUGT,
};
static LLVMRealPredicate fpcond_to_llvm_cond [] = {
LLVMRealOEQ,
LLVMRealUNE,
LLVMRealOLE,
LLVMRealOGE,
LLVMRealOLT,
LLVMRealOGT,
LLVMRealULE,
LLVMRealUGE,
LLVMRealULT,
LLVMRealUGT,
LLVMRealORD,
LLVMRealUNO
};
/* See Table 3-1 ("Comparison Predicate for CMPPD and CMPPS Instructions") in
* Vol. 2A of the Intel SDM.
*/
enum {
SSE_eq_ord_nosignal = 0,
SSE_lt_ord_signal = 1,
SSE_le_ord_signal = 2,
SSE_unord_nosignal = 3,
SSE_neq_unord_nosignal = 4,
SSE_nlt_unord_signal = 5,
SSE_nle_unord_signal = 6,
SSE_ord_nosignal = 7,
};
static MonoLLVMModule aot_module;
static GHashTable *intrins_id_to_intrins;
static LLVMTypeRef i1_t, i2_t, i4_t, i8_t, r4_t, r8_t;
static LLVMTypeRef sse_i1_t, sse_i2_t, sse_i4_t, sse_i8_t, sse_r4_t, sse_r8_t;
static LLVMTypeRef v64_i1_t, v64_i2_t, v64_i4_t, v64_i8_t, v64_r4_t, v64_r8_t;
static LLVMTypeRef v128_i1_t, v128_i2_t, v128_i4_t, v128_i8_t, v128_r4_t, v128_r8_t;
static LLVMTypeRef void_func_t;
static MonoLLVMModule *init_jit_module (void);
static void emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code);
static void emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder);
static LLVMValueRef emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name);
static void emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name);
static void emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit);
static LLVMValueRef get_intrins (EmitContext *ctx, int id);
static LLVMValueRef get_intrins_from_module (LLVMModuleRef lmodule, int id);
static void llvm_jit_finalize_method (EmitContext *ctx);
static void mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params);
static void mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module);
static void create_aot_info_var (MonoLLVMModule *module);
static void set_invariant_load_flag (LLVMValueRef v);
static void set_nonnull_load_flag (LLVMValueRef v);
enum {
INTRIN_scalar = 1 << 0,
INTRIN_vector64 = 1 << 1,
INTRIN_vector128 = 1 << 2,
INTRIN_vectorwidths = 3,
INTRIN_vectormask = 0x7,
INTRIN_int8 = 1 << 3,
INTRIN_int16 = 1 << 4,
INTRIN_int32 = 1 << 5,
INTRIN_int64 = 1 << 6,
INTRIN_float32 = 1 << 7,
INTRIN_float64 = 1 << 8,
INTRIN_elementwidths = 6,
};
typedef uint16_t llvm_ovr_tag_t;
static LLVMTypeRef intrin_types [INTRIN_vectorwidths][INTRIN_elementwidths];
static const llvm_ovr_tag_t intrin_arm64_ovr [] = {
#define INTRINS(sym, ...) 0,
#define INTRINS_OVR(sym, ...) 0,
#define INTRINS_OVR_2_ARG(sym, ...) 0,
#define INTRINS_OVR_3_ARG(sym, ...) 0,
#define INTRINS_OVR_TAG(sym, _, arch, spec) spec,
#define INTRINS_OVR_TAG_KIND(sym, _, kind, arch, spec) spec,
#include "llvm-intrinsics.h"
};
enum {
INTRIN_kind_ftoi = 1,
INTRIN_kind_widen,
INTRIN_kind_widen_across,
INTRIN_kind_across,
INTRIN_kind_arm64_dot_prod,
};
static const uint8_t intrin_kind [] = {
#define INTRINS(sym, ...) 0,
#define INTRINS_OVR(sym, ...) 0,
#define INTRINS_OVR_2_ARG(sym, ...) 0,
#define INTRINS_OVR_3_ARG(sym, ...) 0,
#define INTRINS_OVR_TAG(sym, _, arch, spec) 0,
#define INTRINS_OVR_TAG_KIND(sym, _, arch, kind, spec) kind,
#include "llvm-intrinsics.h"
};
static inline llvm_ovr_tag_t
ovr_tag_force_scalar (llvm_ovr_tag_t tag)
{
return (tag & ~INTRIN_vectormask) | INTRIN_scalar;
}
static inline llvm_ovr_tag_t
ovr_tag_smaller_vector (llvm_ovr_tag_t tag)
{
return (tag & ~INTRIN_vectormask) | ((tag & INTRIN_vectormask) >> 1);
}
static inline llvm_ovr_tag_t
ovr_tag_smaller_elements (llvm_ovr_tag_t tag)
{
return ((tag & ~INTRIN_vectormask) >> 1) | (tag & INTRIN_vectormask);
}
static inline llvm_ovr_tag_t
ovr_tag_corresponding_integer (llvm_ovr_tag_t tag)
{
return ((tag & ~INTRIN_vectormask) >> 2) | (tag & INTRIN_vectormask);
}
static LLVMTypeRef
ovr_tag_to_llvm_type (llvm_ovr_tag_t tag)
{
int vw = 0;
int ew = 0;
if (tag & INTRIN_vector64) vw = 1;
else if (tag & INTRIN_vector128) vw = 2;
if (tag & INTRIN_int16) ew = 1;
else if (tag & INTRIN_int32) ew = 2;
else if (tag & INTRIN_int64) ew = 3;
else if (tag & INTRIN_float32) ew = 4;
else if (tag & INTRIN_float64) ew = 5;
return intrin_types [vw][ew];
}
static int
key_from_id_and_tag (int id, llvm_ovr_tag_t ovr_tag)
{
return (((int) ovr_tag) << 23) | id;
}
static llvm_ovr_tag_t
ovr_tag_from_mono_vector_class (MonoClass *klass) {
int size = mono_class_value_size (klass, NULL);
llvm_ovr_tag_t ret = 0;
switch (size) {
case 8: ret |= INTRIN_vector64; break;
case 16: ret |= INTRIN_vector128; break;
}
MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0];
switch (etype->type) {
case MONO_TYPE_I1: case MONO_TYPE_U1: ret |= INTRIN_int8; break;
case MONO_TYPE_I2: case MONO_TYPE_U2: ret |= INTRIN_int16; break;
case MONO_TYPE_I4: case MONO_TYPE_U4: ret |= INTRIN_int32; break;
case MONO_TYPE_I8: case MONO_TYPE_U8: ret |= INTRIN_int64; break;
case MONO_TYPE_R4: ret |= INTRIN_float32; break;
case MONO_TYPE_R8: ret |= INTRIN_float64; break;
}
return ret;
}
static llvm_ovr_tag_t
ovr_tag_from_llvm_type (LLVMTypeRef type)
{
llvm_ovr_tag_t ret = 0;
LLVMTypeKind kind = LLVMGetTypeKind (type);
LLVMTypeRef elem_t = NULL;
switch (kind) {
case LLVMVectorTypeKind: {
elem_t = LLVMGetElementType (type);
unsigned int bits = mono_llvm_get_prim_size_bits (type);
switch (bits) {
case 64: ret |= INTRIN_vector64; break;
case 128: ret |= INTRIN_vector128; break;
default: g_assert_not_reached ();
}
break;
}
default:
g_assert_not_reached ();
}
if (elem_t == i1_t) ret |= INTRIN_int8;
if (elem_t == i2_t) ret |= INTRIN_int16;
if (elem_t == i4_t) ret |= INTRIN_int32;
if (elem_t == i8_t) ret |= INTRIN_int64;
if (elem_t == r4_t) ret |= INTRIN_float32;
if (elem_t == r8_t) ret |= INTRIN_float64;
return ret;
}
static inline void
set_failure (EmitContext *ctx, const char *message)
{
TRACE_FAILURE (reason);
ctx->cfg->exception_message = g_strdup (message);
ctx->cfg->disable_llvm = TRUE;
}
static LLVMValueRef
const_int1 (int v)
{
return LLVMConstInt (LLVMInt1Type (), v ? 1 : 0, FALSE);
}
static LLVMValueRef
const_int8 (int v)
{
return LLVMConstInt (LLVMInt8Type (), v, FALSE);
}
static LLVMValueRef
const_int32 (int v)
{
return LLVMConstInt (LLVMInt32Type (), v, FALSE);
}
static LLVMValueRef
const_int64 (int64_t v)
{
return LLVMConstInt (LLVMInt64Type (), v, FALSE);
}
/*
* IntPtrType:
*
* The LLVM type with width == TARGET_SIZEOF_VOID_P
*/
static LLVMTypeRef
IntPtrType (void)
{
return TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type ();
}
static LLVMTypeRef
ObjRefType (void)
{
return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0);
}
static LLVMTypeRef
ThisType (void)
{
return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0);
}
typedef struct {
int32_t size;
uint32_t align;
} MonoSizeAlign;
/*
* get_vtype_size:
*
* Return the size of the LLVM representation of the vtype T.
*/
static MonoSizeAlign
get_vtype_size_align (MonoType *t)
{
uint32_t align = 0;
int32_t size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align);
/* LLVMArgAsIArgs depends on this since it stores whole words */
while (size < 2 * TARGET_SIZEOF_VOID_P && mono_is_power_of_two (size) == -1)
size ++;
MonoSizeAlign ret = { size, align };
return ret;
}
/*
* simd_class_to_llvm_type:
*
* Return the LLVM type corresponding to the Mono.SIMD class KLASS
*/
static LLVMTypeRef
simd_class_to_llvm_type (EmitContext *ctx, MonoClass *klass)
{
const char *klass_name = m_class_get_name (klass);
if (!strcmp (klass_name, "Vector2d")) {
return LLVMVectorType (LLVMDoubleType (), 2);
} else if (!strcmp (klass_name, "Vector2l")) {
return LLVMVectorType (LLVMInt64Type (), 2);
} else if (!strcmp (klass_name, "Vector2ul")) {
return LLVMVectorType (LLVMInt64Type (), 2);
} else if (!strcmp (klass_name, "Vector4i")) {
return LLVMVectorType (LLVMInt32Type (), 4);
} else if (!strcmp (klass_name, "Vector4ui")) {
return LLVMVectorType (LLVMInt32Type (), 4);
} else if (!strcmp (klass_name, "Vector4f")) {
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector8s")) {
return LLVMVectorType (LLVMInt16Type (), 8);
} else if (!strcmp (klass_name, "Vector8us")) {
return LLVMVectorType (LLVMInt16Type (), 8);
} else if (!strcmp (klass_name, "Vector16sb")) {
return LLVMVectorType (LLVMInt8Type (), 16);
} else if (!strcmp (klass_name, "Vector16b")) {
return LLVMVectorType (LLVMInt8Type (), 16);
} else if (!strcmp (klass_name, "Vector2")) {
/* System.Numerics */
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector3")) {
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector4")) {
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector`1") || !strcmp (klass_name, "Vector64`1") || !strcmp (klass_name, "Vector128`1") || !strcmp (klass_name, "Vector256`1")) {
MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0];
int size = mono_class_value_size (klass, NULL);
switch (etype->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return LLVMVectorType (LLVMInt8Type (), size);
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return LLVMVectorType (LLVMInt16Type (), size / 2);
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return LLVMVectorType (LLVMInt32Type (), size / 4);
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return LLVMVectorType (LLVMInt64Type (), size / 8);
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return LLVMVectorType (LLVMInt64Type (), size / 8);
#else
return LLVMVectorType (LLVMInt32Type (), size / 4);
#endif
case MONO_TYPE_R4:
return LLVMVectorType (LLVMFloatType (), size / 4);
case MONO_TYPE_R8:
return LLVMVectorType (LLVMDoubleType (), size / 8);
default:
g_assert_not_reached ();
return NULL;
}
} else {
printf ("%s\n", klass_name);
NOT_IMPLEMENTED;
return NULL;
}
}
static LLVMTypeRef
simd_valuetuple_to_llvm_type (EmitContext *ctx, MonoClass *klass)
{
const char *klass_name = m_class_get_name (klass);
if (!strcmp (klass_name, "ValueTuple`2")) {
MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0];
if (etype->type != MONO_TYPE_GENERICINST)
g_assert_not_reached ();
MonoClass *eklass = etype->data.generic_class->cached_class;
LLVMTypeRef ltype = simd_class_to_llvm_type (ctx, eklass);
return LLVMArrayType (ltype, 2);
}
g_assert_not_reached ();
}
/* Return the 128 bit SIMD type corresponding to the mono type TYPE */
static inline G_GNUC_UNUSED LLVMTypeRef
type_to_sse_type (int type)
{
switch (type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return LLVMVectorType (LLVMInt8Type (), 16);
case MONO_TYPE_U2:
case MONO_TYPE_I2:
return LLVMVectorType (LLVMInt16Type (), 8);
case MONO_TYPE_U4:
case MONO_TYPE_I4:
return LLVMVectorType (LLVMInt32Type (), 4);
case MONO_TYPE_U8:
case MONO_TYPE_I8:
return LLVMVectorType (LLVMInt64Type (), 2);
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return LLVMVectorType (LLVMInt64Type (), 2);
#else
return LLVMVectorType (LLVMInt32Type (), 4);
#endif
case MONO_TYPE_R8:
return LLVMVectorType (LLVMDoubleType (), 2);
case MONO_TYPE_R4:
return LLVMVectorType (LLVMFloatType (), 4);
default:
g_assert_not_reached ();
return NULL;
}
}
static LLVMTypeRef
create_llvm_type_for_type (MonoLLVMModule *module, MonoClass *klass)
{
int i, size, nfields, esize;
LLVMTypeRef *eltypes;
char *name;
MonoType *t;
LLVMTypeRef ltype;
t = m_class_get_byval_arg (klass);
if (mini_type_is_hfa (t, &nfields, &esize)) {
/*
* This is needed on arm64 where HFAs are returned in
* registers.
*/
/* SIMD types have size 16 in mono_class_value_size () */
if (m_class_is_simd_type (klass))
nfields = 16/ esize;
size = nfields;
eltypes = g_new (LLVMTypeRef, size);
for (i = 0; i < size; ++i)
eltypes [i] = esize == 4 ? LLVMFloatType () : LLVMDoubleType ();
} else {
MonoSizeAlign size_align = get_vtype_size_align (t);
eltypes = g_new (LLVMTypeRef, size_align.size);
size = 0;
uint32_t bytes = 0;
uint32_t chunk = size_align.align < TARGET_SIZEOF_VOID_P ? size_align.align : TARGET_SIZEOF_VOID_P;
for (; chunk > 0; chunk = chunk >> 1) {
for (; (bytes + chunk) <= size_align.size; bytes += chunk) {
eltypes [size] = LLVMIntType (chunk * 8);
++size;
}
}
}
name = mono_type_full_name (m_class_get_byval_arg (klass));
ltype = LLVMStructCreateNamed (module->context, name);
LLVMStructSetBody (ltype, eltypes, size, FALSE);
g_free (eltypes);
g_free (name);
return ltype;
}
static LLVMTypeRef
primitive_type_to_llvm_type (MonoTypeEnum type)
{
switch (type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return LLVMInt8Type ();
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return LLVMInt16Type ();
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return LLVMInt32Type ();
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return LLVMInt64Type ();
case MONO_TYPE_R4:
return LLVMFloatType ();
case MONO_TYPE_R8:
return LLVMDoubleType ();
case MONO_TYPE_I:
case MONO_TYPE_U:
return IntPtrType ();
default:
return NULL;
}
}
static MonoTypeEnum
inst_c1_type (const MonoInst *ins)
{
return (MonoTypeEnum)ins->inst_c1;
}
/*
* type_to_llvm_type:
*
* Return the LLVM type corresponding to T.
*/
static LLVMTypeRef
type_to_llvm_type (EmitContext *ctx, MonoType *t)
{
if (m_type_is_byref (t))
return ThisType ();
t = mini_get_underlying_type (t);
LLVMTypeRef prim_llvm_type = primitive_type_to_llvm_type (t->type);
if (prim_llvm_type != NULL)
return prim_llvm_type;
switch (t->type) {
case MONO_TYPE_VOID:
return LLVMVoidType ();
case MONO_TYPE_OBJECT:
return ObjRefType ();
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR: {
MonoClass *klass = mono_class_from_mono_type_internal (t);
MonoClass *ptr_klass = m_class_get_element_class (klass);
MonoType *ptr_type = m_class_get_byval_arg (ptr_klass);
/* Handle primitive pointers */
switch (ptr_type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_I2:
case MONO_TYPE_I4:
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
return LLVMPointerType (type_to_llvm_type (ctx, ptr_type), 0);
}
return ObjRefType ();
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* Because of generic sharing */
return ObjRefType ();
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t))
return ObjRefType ();
/* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
MonoClass *klass;
LLVMTypeRef ltype;
klass = mono_class_from_mono_type_internal (t);
if (MONO_CLASS_IS_SIMD (ctx->cfg, klass))
return simd_class_to_llvm_type (ctx, klass);
if (m_class_is_enumtype (klass))
return type_to_llvm_type (ctx, mono_class_enum_basetype_internal (klass));
ltype = (LLVMTypeRef)g_hash_table_lookup (ctx->module->llvm_types, klass);
if (!ltype) {
ltype = create_llvm_type_for_type (ctx->module, klass);
g_hash_table_insert (ctx->module->llvm_types, klass, ltype);
}
return ltype;
}
default:
printf ("X: %d\n", t->type);
ctx->cfg->exception_message = g_strdup_printf ("type %s", mono_type_full_name (t));
ctx->cfg->disable_llvm = TRUE;
return NULL;
}
}
static gboolean
primitive_type_is_unsigned (MonoTypeEnum t)
{
switch (t) {
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_U:
return TRUE;
default:
return FALSE;
}
}
/*
* type_is_unsigned:
*
* Return whenever T is an unsigned int type.
*/
static gboolean
type_is_unsigned (EmitContext *ctx, MonoType *t)
{
t = mini_get_underlying_type (t);
if (m_type_is_byref (t))
return FALSE;
return primitive_type_is_unsigned (t->type);
}
/*
* type_to_llvm_arg_type:
*
* Same as type_to_llvm_type, but treat i8/i16 as i32.
*/
static LLVMTypeRef
type_to_llvm_arg_type (EmitContext *ctx, MonoType *t)
{
LLVMTypeRef ptype = type_to_llvm_type (ctx, t);
if (ctx->cfg->llvm_only)
return ptype;
/*
* This works on all abis except arm64/ios which passes multiple
* arguments in one stack slot.
*/
#ifndef TARGET_ARM64
if (ptype == LLVMInt8Type () || ptype == LLVMInt16Type ()) {
/*
* LLVM generates code which only sets the lower bits, while JITted
* code expects all the bits to be set.
*/
ptype = LLVMInt32Type ();
}
#endif
return ptype;
}
/*
* llvm_type_to_stack_type:
*
* Return the LLVM type which needs to be used when a value of type TYPE is pushed
* on the IL stack.
*/
static G_GNUC_UNUSED LLVMTypeRef
llvm_type_to_stack_type (MonoCompile *cfg, LLVMTypeRef type)
{
if (type == NULL)
return NULL;
if (type == LLVMInt8Type ())
return LLVMInt32Type ();
else if (type == LLVMInt16Type ())
return LLVMInt32Type ();
else
return type;
}
/*
* regtype_to_llvm_type:
*
* Return the LLVM type corresponding to the regtype C used in instruction
* descriptions.
*/
static LLVMTypeRef
regtype_to_llvm_type (char c)
{
switch (c) {
case 'i':
return LLVMInt32Type ();
case 'l':
return LLVMInt64Type ();
case 'f':
return LLVMDoubleType ();
default:
return NULL;
}
}
/*
* op_to_llvm_type:
*
* Return the LLVM type corresponding to the unary/binary opcode OPCODE.
*/
static LLVMTypeRef
op_to_llvm_type (int opcode)
{
switch (opcode) {
case OP_ICONV_TO_I1:
case OP_LCONV_TO_I1:
return LLVMInt8Type ();
case OP_ICONV_TO_U1:
case OP_LCONV_TO_U1:
return LLVMInt8Type ();
case OP_ICONV_TO_I2:
case OP_LCONV_TO_I2:
return LLVMInt16Type ();
case OP_ICONV_TO_U2:
case OP_LCONV_TO_U2:
return LLVMInt16Type ();
case OP_ICONV_TO_I4:
case OP_LCONV_TO_I4:
return LLVMInt32Type ();
case OP_ICONV_TO_U4:
case OP_LCONV_TO_U4:
return LLVMInt32Type ();
case OP_ICONV_TO_I8:
return LLVMInt64Type ();
case OP_ICONV_TO_R4:
return LLVMFloatType ();
case OP_ICONV_TO_R8:
return LLVMDoubleType ();
case OP_ICONV_TO_U8:
return LLVMInt64Type ();
case OP_FCONV_TO_I4:
return LLVMInt32Type ();
case OP_FCONV_TO_I8:
return LLVMInt64Type ();
case OP_FCONV_TO_I1:
case OP_FCONV_TO_U1:
case OP_RCONV_TO_I1:
case OP_RCONV_TO_U1:
return LLVMInt8Type ();
case OP_FCONV_TO_I2:
case OP_FCONV_TO_U2:
case OP_RCONV_TO_I2:
case OP_RCONV_TO_U2:
return LLVMInt16Type ();
case OP_FCONV_TO_U4:
case OP_RCONV_TO_U4:
return LLVMInt32Type ();
case OP_FCONV_TO_U8:
case OP_RCONV_TO_U8:
return LLVMInt64Type ();
case OP_FCONV_TO_I:
case OP_RCONV_TO_I:
return TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type ();
case OP_IADD_OVF:
case OP_IADD_OVF_UN:
case OP_ISUB_OVF:
case OP_ISUB_OVF_UN:
case OP_IMUL_OVF:
case OP_IMUL_OVF_UN:
return LLVMInt32Type ();
case OP_LADD_OVF:
case OP_LADD_OVF_UN:
case OP_LSUB_OVF:
case OP_LSUB_OVF_UN:
case OP_LMUL_OVF:
case OP_LMUL_OVF_UN:
return LLVMInt64Type ();
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
return NULL;
}
}
#define CLAUSE_START(clause) ((clause)->try_offset)
#define CLAUSE_END(clause) (((clause))->try_offset + ((clause))->try_len)
/*
* load_store_to_llvm_type:
*
* Return the size/sign/zero extension corresponding to the load/store opcode
* OPCODE.
*/
static LLVMTypeRef
load_store_to_llvm_type (int opcode, int *size, gboolean *sext, gboolean *zext)
{
*sext = FALSE;
*zext = FALSE;
switch (opcode) {
case OP_LOADI1_MEMBASE:
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI1_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_STORE_I1:
*size = 1;
*sext = TRUE;
return LLVMInt8Type ();
case OP_LOADU1_MEMBASE:
case OP_LOADU1_MEM:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_STORE_U1:
*size = 1;
*zext = TRUE;
return LLVMInt8Type ();
case OP_LOADI2_MEMBASE:
case OP_STOREI2_MEMBASE_REG:
case OP_STOREI2_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_STORE_I2:
*size = 2;
*sext = TRUE;
return LLVMInt16Type ();
case OP_LOADU2_MEMBASE:
case OP_LOADU2_MEM:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_STORE_U2:
*size = 2;
*zext = TRUE;
return LLVMInt16Type ();
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADI4_MEM:
case OP_LOADU4_MEM:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI4_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_STORE_U4:
*size = 4;
return LLVMInt32Type ();
case OP_LOADI8_MEMBASE:
case OP_LOADI8_MEM:
case OP_STOREI8_MEMBASE_REG:
case OP_STOREI8_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_STORE_U8:
*size = 8;
return LLVMInt64Type ();
case OP_LOADR4_MEMBASE:
case OP_STORER4_MEMBASE_REG:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_STORE_R4:
*size = 4;
return LLVMFloatType ();
case OP_LOADR8_MEMBASE:
case OP_STORER8_MEMBASE_REG:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_R8:
*size = 8;
return LLVMDoubleType ();
case OP_LOAD_MEMBASE:
case OP_LOAD_MEM:
case OP_STORE_MEMBASE_REG:
case OP_STORE_MEMBASE_IMM:
*size = TARGET_SIZEOF_VOID_P;
return IntPtrType ();
default:
g_assert_not_reached ();
return NULL;
}
}
/*
* ovf_op_to_intrins:
*
* Return the LLVM intrinsics corresponding to the overflow opcode OPCODE.
*/
static IntrinsicId
ovf_op_to_intrins (int opcode)
{
switch (opcode) {
case OP_IADD_OVF:
return INTRINS_SADD_OVF_I32;
case OP_IADD_OVF_UN:
return INTRINS_UADD_OVF_I32;
case OP_ISUB_OVF:
return INTRINS_SSUB_OVF_I32;
case OP_ISUB_OVF_UN:
return INTRINS_USUB_OVF_I32;
case OP_IMUL_OVF:
return INTRINS_SMUL_OVF_I32;
case OP_IMUL_OVF_UN:
return INTRINS_UMUL_OVF_I32;
case OP_LADD_OVF:
return INTRINS_SADD_OVF_I64;
case OP_LADD_OVF_UN:
return INTRINS_UADD_OVF_I64;
case OP_LSUB_OVF:
return INTRINS_SSUB_OVF_I64;
case OP_LSUB_OVF_UN:
return INTRINS_USUB_OVF_I64;
case OP_LMUL_OVF:
return INTRINS_SMUL_OVF_I64;
case OP_LMUL_OVF_UN:
return INTRINS_UMUL_OVF_I64;
default:
g_assert_not_reached ();
return (IntrinsicId)0;
}
}
static IntrinsicId
simd_ins_to_intrins (int opcode)
{
switch (opcode) {
#if defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_CVTPD2DQ:
return INTRINS_SSE_CVTPD2DQ;
case OP_CVTPS2DQ:
return INTRINS_SSE_CVTPS2DQ;
case OP_CVTPD2PS:
return INTRINS_SSE_CVTPD2PS;
case OP_CVTTPD2DQ:
return INTRINS_SSE_CVTTPD2DQ;
case OP_CVTTPS2DQ:
return INTRINS_SSE_CVTTPS2DQ;
case OP_SSE_SQRTSS:
return INTRINS_SSE_SQRT_SS;
case OP_SSE2_SQRTSD:
return INTRINS_SSE_SQRT_SD;
#endif
default:
g_assert_not_reached ();
return (IntrinsicId)0;
}
}
static LLVMTypeRef
simd_op_to_llvm_type (int opcode)
{
#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_EXTRACT_R8:
case OP_EXPAND_R8:
return sse_r8_t;
case OP_EXTRACT_I8:
case OP_EXPAND_I8:
return sse_i8_t;
case OP_EXTRACT_I4:
case OP_EXPAND_I4:
return sse_i4_t;
case OP_EXTRACT_I2:
case OP_EXTRACTX_U2:
case OP_EXPAND_I2:
return sse_i2_t;
case OP_EXTRACT_I1:
case OP_EXPAND_I1:
return sse_i1_t;
case OP_EXTRACT_R4:
case OP_EXPAND_R4:
return sse_r4_t;
case OP_CVTPD2DQ:
case OP_CVTPD2PS:
case OP_CVTTPD2DQ:
return sse_r8_t;
case OP_CVTPS2DQ:
case OP_CVTTPS2DQ:
return sse_r4_t;
case OP_SQRTPS:
case OP_RSQRTPS:
case OP_DUPPS_LOW:
case OP_DUPPS_HIGH:
return sse_r4_t;
case OP_SQRTPD:
case OP_DUPPD:
return sse_r8_t;
default:
g_assert_not_reached ();
return NULL;
}
#else
return NULL;
#endif
}
static void
set_cold_cconv (LLVMValueRef func)
{
/*
* xcode10 (watchOS) and ARM/ARM64 doesn't seem to support preserveall, it fails with:
* fatal error: error in backend: Unsupported calling convention
*/
#if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64)
LLVMSetFunctionCallConv (func, LLVMColdCallConv);
#endif
}
static void
set_call_cold_cconv (LLVMValueRef func)
{
#if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64)
LLVMSetInstructionCallConv (func, LLVMColdCallConv);
#endif
}
/*
* get_bb:
*
* Return the LLVM basic block corresponding to BB.
*/
static LLVMBasicBlockRef
get_bb (EmitContext *ctx, MonoBasicBlock *bb)
{
char bb_name_buf [128];
char *bb_name;
if (ctx->bblocks [bb->block_num].bblock == NULL) {
if (bb->flags & BB_EXCEPTION_HANDLER) {
int clause_index = (mono_get_block_region_notry (ctx->cfg, bb->region) >> 8) - 1;
sprintf (bb_name_buf, "EH_CLAUSE%d_BB%d", clause_index, bb->block_num);
bb_name = bb_name_buf;
} else if (bb->block_num < 256) {
if (!ctx->module->bb_names) {
ctx->module->bb_names_len = 256;
ctx->module->bb_names = g_new0 (char*, ctx->module->bb_names_len);
}
if (!ctx->module->bb_names [bb->block_num]) {
char *n;
n = g_strdup_printf ("BB%d", bb->block_num);
mono_memory_barrier ();
ctx->module->bb_names [bb->block_num] = n;
}
bb_name = ctx->module->bb_names [bb->block_num];
} else {
sprintf (bb_name_buf, "BB%d", bb->block_num);
bb_name = bb_name_buf;
}
ctx->bblocks [bb->block_num].bblock = LLVMAppendBasicBlock (ctx->lmethod, bb_name);
ctx->bblocks [bb->block_num].end_bblock = ctx->bblocks [bb->block_num].bblock;
}
return ctx->bblocks [bb->block_num].bblock;
}
/*
* get_end_bb:
*
* Return the last LLVM bblock corresponding to BB.
* This might not be equal to the bb returned by get_bb () since we need to generate
* multiple LLVM bblocks for a mono bblock to handle throwing exceptions.
*/
static LLVMBasicBlockRef
get_end_bb (EmitContext *ctx, MonoBasicBlock *bb)
{
get_bb (ctx, bb);
return ctx->bblocks [bb->block_num].end_bblock;
}
static LLVMBasicBlockRef
gen_bb (EmitContext *ctx, const char *prefix)
{
char bb_name [128];
sprintf (bb_name, "%s%d", prefix, ++ ctx->ex_index);
return LLVMAppendBasicBlock (ctx->lmethod, bb_name);
}
/*
* resolve_patch:
*
* Return the target of the patch identified by TYPE and TARGET.
*/
static gpointer
resolve_patch (MonoCompile *cfg, MonoJumpInfoType type, gconstpointer target)
{
MonoJumpInfo ji;
ERROR_DECL (error);
gpointer res;
memset (&ji, 0, sizeof (ji));
ji.type = type;
ji.data.target = target;
res = mono_resolve_patch_target (cfg->method, NULL, &ji, FALSE, error);
mono_error_assert_ok (error);
return res;
}
/*
* convert_full:
*
* Emit code to convert the LLVM value V to DTYPE.
*/
static LLVMValueRef
convert_full (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype, gboolean is_unsigned)
{
LLVMTypeRef stype = LLVMTypeOf (v);
if (stype != dtype) {
gboolean ext = FALSE;
/* Extend */
if (dtype == LLVMInt64Type () && (stype == LLVMInt32Type () || stype == LLVMInt16Type () || stype == LLVMInt8Type ()))
ext = TRUE;
else if (dtype == LLVMInt32Type () && (stype == LLVMInt16Type () || stype == LLVMInt8Type ()))
ext = TRUE;
else if (dtype == LLVMInt16Type () && (stype == LLVMInt8Type ()))
ext = TRUE;
if (ext)
return is_unsigned ? LLVMBuildZExt (ctx->builder, v, dtype, "") : LLVMBuildSExt (ctx->builder, v, dtype, "");
if (dtype == LLVMDoubleType () && stype == LLVMFloatType ())
return LLVMBuildFPExt (ctx->builder, v, dtype, "");
/* Trunc */
if (stype == LLVMInt64Type () && (dtype == LLVMInt32Type () || dtype == LLVMInt16Type () || dtype == LLVMInt8Type ()))
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMInt32Type () && (dtype == LLVMInt16Type () || dtype == LLVMInt8Type ()))
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMInt16Type () && dtype == LLVMInt8Type ())
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMDoubleType () && dtype == LLVMFloatType ())
return LLVMBuildFPTrunc (ctx->builder, v, dtype, "");
if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind && LLVMGetTypeKind (dtype) == LLVMPointerTypeKind)
return LLVMBuildBitCast (ctx->builder, v, dtype, "");
if (LLVMGetTypeKind (dtype) == LLVMPointerTypeKind)
return LLVMBuildIntToPtr (ctx->builder, v, dtype, "");
if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind)
return LLVMBuildPtrToInt (ctx->builder, v, dtype, "");
if (mono_arch_is_soft_float ()) {
if (stype == LLVMInt32Type () && dtype == LLVMFloatType ())
return LLVMBuildBitCast (ctx->builder, v, dtype, "");
if (stype == LLVMInt32Type () && dtype == LLVMDoubleType ())
return LLVMBuildBitCast (ctx->builder, LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), ""), dtype, "");
}
if (LLVMGetTypeKind (stype) == LLVMVectorTypeKind && LLVMGetTypeKind (dtype) == LLVMVectorTypeKind) {
if (mono_llvm_get_prim_size_bits (stype) == mono_llvm_get_prim_size_bits (dtype))
return LLVMBuildBitCast (ctx->builder, v, dtype, "");
}
mono_llvm_dump_value (v);
mono_llvm_dump_type (dtype);
printf ("\n");
g_assert_not_reached ();
return NULL;
} else {
return v;
}
}
static LLVMValueRef
convert (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype)
{
return convert_full (ctx, v, dtype, FALSE);
}
static void
emit_memset (EmitContext *ctx, LLVMBuilderRef builder, LLVMValueRef v, LLVMValueRef size, int alignment)
{
LLVMValueRef args [5];
int aindex = 0;
args [aindex ++] = v;
args [aindex ++] = LLVMConstInt (LLVMInt8Type (), 0, FALSE);
args [aindex ++] = size;
args [aindex ++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE);
LLVMBuildCall (builder, get_intrins (ctx, INTRINS_MEMSET), args, aindex, "");
}
/*
* emit_volatile_load:
*
* If vreg is volatile, emit a load from its address.
*/
static LLVMValueRef
emit_volatile_load (EmitContext *ctx, int vreg)
{
MonoType *t;
LLVMValueRef v;
// On arm64, we pass the rgctx in a callee saved
// register on arm64 (x15), and llvm might keep the value in that register
// even through the register is marked as 'reserved' inside llvm.
v = mono_llvm_build_load (ctx->builder, ctx->addresses [vreg], "", TRUE);
t = ctx->vreg_cli_types [vreg];
if (t && !m_type_is_byref (t)) {
/*
* Might have to zero extend since llvm doesn't have
* unsigned types.
*/
if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_CHAR || t->type == MONO_TYPE_BOOLEAN)
v = LLVMBuildZExt (ctx->builder, v, LLVMInt32Type (), "");
else if (t->type == MONO_TYPE_I1 || t->type == MONO_TYPE_I2)
v = LLVMBuildSExt (ctx->builder, v, LLVMInt32Type (), "");
else if (t->type == MONO_TYPE_U8)
v = LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), "");
}
return v;
}
/*
* emit_volatile_store:
*
* If VREG is volatile, emit a store from its value to its address.
*/
static void
emit_volatile_store (EmitContext *ctx, int vreg)
{
MonoInst *var = get_vreg_to_inst (ctx->cfg, vreg);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) {
g_assert (ctx->addresses [vreg]);
#ifdef TARGET_WASM
/* Need volatile stores otherwise the compiler might move them */
mono_llvm_build_store (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg], TRUE, LLVM_BARRIER_NONE);
#else
LLVMBuildStore (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg]);
#endif
}
}
static LLVMTypeRef
sig_to_llvm_sig_no_cinfo (EmitContext *ctx, MonoMethodSignature *sig)
{
LLVMTypeRef ret_type;
LLVMTypeRef *param_types = NULL;
LLVMTypeRef res;
int i, pindex;
ret_type = type_to_llvm_type (ctx, sig->ret);
if (!ctx_ok (ctx))
return NULL;
param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3);
pindex = 0;
if (sig->hasthis)
param_types [pindex ++] = ThisType ();
for (i = 0; i < sig->param_count; ++i)
param_types [pindex ++] = type_to_llvm_arg_type (ctx, sig->params [i]);
if (!ctx_ok (ctx)) {
g_free (param_types);
return NULL;
}
res = LLVMFunctionType (ret_type, param_types, pindex, FALSE);
g_free (param_types);
return res;
}
/*
* sig_to_llvm_sig_full:
*
* Return the LLVM signature corresponding to the mono signature SIG using the
* calling convention information in CINFO. Fill out the parameter mapping information in CINFO.
*/
static LLVMTypeRef
sig_to_llvm_sig_full (EmitContext *ctx, MonoMethodSignature *sig, LLVMCallInfo *cinfo)
{
LLVMTypeRef ret_type;
LLVMTypeRef *param_types = NULL;
LLVMTypeRef res;
int i, j, pindex, vret_arg_pindex = 0;
gboolean vretaddr = FALSE;
MonoType *rtype;
if (!cinfo)
return sig_to_llvm_sig_no_cinfo (ctx, sig);
ret_type = type_to_llvm_type (ctx, sig->ret);
if (!ctx_ok (ctx))
return NULL;
rtype = mini_get_underlying_type (sig->ret);
switch (cinfo->ret.storage) {
case LLVMArgVtypeInReg:
/* LLVM models this by returning an aggregate value */
if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgNone) {
LLVMTypeRef members [2];
members [0] = IntPtrType ();
ret_type = LLVMStructType (members, 1, FALSE);
} else if (cinfo->ret.pair_storage [0] == LLVMArgNone && cinfo->ret.pair_storage [1] == LLVMArgNone) {
/* Empty struct */
ret_type = LLVMVoidType ();
} else if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgInIReg) {
LLVMTypeRef members [2];
members [0] = IntPtrType ();
members [1] = IntPtrType ();
ret_type = LLVMStructType (members, 2, FALSE);
} else {
g_assert_not_reached ();
}
break;
case LLVMArgVtypeByVal:
/* Vtype returned normally by val */
break;
case LLVMArgVtypeAsScalar: {
int size = mono_class_value_size (mono_class_from_mono_type_internal (rtype), NULL);
/* LLVM models this by returning an int */
if (size < TARGET_SIZEOF_VOID_P) {
g_assert (cinfo->ret.nslots == 1);
ret_type = LLVMIntType (size * 8);
} else {
g_assert (cinfo->ret.nslots == 1 || cinfo->ret.nslots == 2);
ret_type = LLVMIntType (cinfo->ret.nslots * sizeof (target_mgreg_t) * 8);
}
break;
}
case LLVMArgAsIArgs:
ret_type = LLVMArrayType (IntPtrType (), cinfo->ret.nslots);
break;
case LLVMArgFpStruct: {
/* Vtype returned as a fp struct */
LLVMTypeRef members [16];
/* Have to create our own structure since we don't map fp structures to LLVM fp structures yet */
for (i = 0; i < cinfo->ret.nslots; ++i)
members [i] = cinfo->ret.esize == 8 ? LLVMDoubleType () : LLVMFloatType ();
ret_type = LLVMStructType (members, cinfo->ret.nslots, FALSE);
break;
}
case LLVMArgVtypeByRef:
/* Vtype returned using a hidden argument */
ret_type = LLVMVoidType ();
break;
case LLVMArgVtypeRetAddr:
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
case LLVMArgGsharedvtVariable:
vretaddr = TRUE;
ret_type = LLVMVoidType ();
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (cinfo->ret.esize);
ret_type = LLVMIntType (cinfo->ret.esize * 8);
break;
default:
break;
}
param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3);
pindex = 0;
if (cinfo->ret.storage == LLVMArgVtypeByRef) {
/*
* Has to be the first argument because of the sret argument attribute
* FIXME: This might conflict with passing 'this' as the first argument, but
* this is only used on arm64 which has a dedicated struct return register.
*/
cinfo->vret_arg_pindex = pindex;
param_types [pindex] = type_to_llvm_arg_type (ctx, sig->ret);
if (!ctx_ok (ctx)) {
g_free (param_types);
return NULL;
}
param_types [pindex] = LLVMPointerType (param_types [pindex], 0);
pindex ++;
}
if (!ctx->llvm_only && cinfo->rgctx_arg) {
cinfo->rgctx_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
}
if (cinfo->imt_arg) {
cinfo->imt_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
}
if (vretaddr) {
/* Compute the index in the LLVM signature where the vret arg needs to be passed */
vret_arg_pindex = pindex;
if (cinfo->vret_arg_index == 1) {
/* Add the slots consumed by the first argument */
LLVMArgInfo *ainfo = &cinfo->args [0];
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
for (j = 0; j < 2; ++j) {
if (ainfo->pair_storage [j] == LLVMArgInIReg)
vret_arg_pindex ++;
}
break;
default:
vret_arg_pindex ++;
}
}
cinfo->vret_arg_pindex = vret_arg_pindex;
}
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
if (sig->hasthis) {
cinfo->this_arg_pindex = pindex;
param_types [pindex ++] = ThisType ();
cinfo->args [0].pindex = cinfo->this_arg_pindex;
}
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &cinfo->args [i + sig->hasthis];
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
ainfo->pindex = pindex;
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
for (j = 0; j < 2; ++j) {
switch (ainfo->pair_storage [j]) {
case LLVMArgInIReg:
param_types [pindex ++] = LLVMIntType (TARGET_SIZEOF_VOID_P * 8);
break;
case LLVMArgNone:
break;
default:
g_assert_not_reached ();
}
}
break;
case LLVMArgVtypeByVal:
param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type);
if (!ctx_ok (ctx))
break;
param_types [pindex] = LLVMPointerType (param_types [pindex], 0);
pindex ++;
break;
case LLVMArgAsIArgs:
if (ainfo->esize == 8)
param_types [pindex] = LLVMArrayType (LLVMInt64Type (), ainfo->nslots);
else
param_types [pindex] = LLVMArrayType (IntPtrType (), ainfo->nslots);
pindex ++;
break;
case LLVMArgVtypeAddr:
case LLVMArgVtypeByRef:
param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type);
if (!ctx_ok (ctx))
break;
param_types [pindex] = LLVMPointerType (param_types [pindex], 0);
pindex ++;
break;
case LLVMArgAsFpArgs: {
int j;
/* Emit dummy fp arguments if needed so the rest is passed on the stack */
for (j = 0; j < ainfo->ndummy_fpargs; ++j)
param_types [pindex ++] = LLVMDoubleType ();
for (j = 0; j < ainfo->nslots; ++j)
param_types [pindex ++] = ainfo->esize == 8 ? LLVMDoubleType () : LLVMFloatType ();
break;
}
case LLVMArgVtypeAsScalar:
g_assert_not_reached ();
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (ainfo->esize);
param_types [pindex ++] = LLVMIntType (ainfo->esize * 8);
break;
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
param_types [pindex ++] = LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0);
break;
case LLVMArgGsharedvtVariable:
param_types [pindex ++] = LLVMPointerType (IntPtrType (), 0);
break;
default:
param_types [pindex ++] = type_to_llvm_arg_type (ctx, ainfo->type);
break;
}
}
if (!ctx_ok (ctx)) {
g_free (param_types);
return NULL;
}
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
if (ctx->llvm_only && cinfo->rgctx_arg) {
/* Pass the rgctx as the last argument */
cinfo->rgctx_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
} else if (ctx->llvm_only && cinfo->dummy_arg) {
/* Pass a dummy arg last */
cinfo->dummy_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
}
res = LLVMFunctionType (ret_type, param_types, pindex, FALSE);
g_free (param_types);
return res;
}
static LLVMTypeRef
sig_to_llvm_sig (EmitContext *ctx, MonoMethodSignature *sig)
{
return sig_to_llvm_sig_full (ctx, sig, NULL);
}
/*
* LLVMFunctionType1:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType0 (LLVMTypeRef ReturnType,
int IsVarArg)
{
return LLVMFunctionType (ReturnType, NULL, 0, IsVarArg);
}
/*
* LLVMFunctionType1:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType1 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
int IsVarArg)
{
LLVMTypeRef param_types [1];
param_types [0] = ParamType1;
return LLVMFunctionType (ReturnType, param_types, 1, IsVarArg);
}
/*
* LLVMFunctionType2:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType2 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
int IsVarArg)
{
LLVMTypeRef param_types [2];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
return LLVMFunctionType (ReturnType, param_types, 2, IsVarArg);
}
/*
* LLVMFunctionType3:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType3 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
LLVMTypeRef ParamType3,
int IsVarArg)
{
LLVMTypeRef param_types [3];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
param_types [2] = ParamType3;
return LLVMFunctionType (ReturnType, param_types, 3, IsVarArg);
}
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType4 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
LLVMTypeRef ParamType3,
LLVMTypeRef ParamType4,
int IsVarArg)
{
LLVMTypeRef param_types [4];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
param_types [2] = ParamType3;
param_types [3] = ParamType4;
return LLVMFunctionType (ReturnType, param_types, 4, IsVarArg);
}
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType5 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
LLVMTypeRef ParamType3,
LLVMTypeRef ParamType4,
LLVMTypeRef ParamType5,
int IsVarArg)
{
LLVMTypeRef param_types [5];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
param_types [2] = ParamType3;
param_types [3] = ParamType4;
param_types [4] = ParamType5;
return LLVMFunctionType (ReturnType, param_types, 5, IsVarArg);
}
/*
* create_builder:
*
* Create an LLVM builder and remember it so it can be freed later.
*/
static LLVMBuilderRef
create_builder (EmitContext *ctx)
{
LLVMBuilderRef builder = LLVMCreateBuilder ();
if (mono_use_fast_math)
mono_llvm_set_fast_math (builder);
ctx->builders = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->builders, builder);
emit_default_dbg_loc (ctx, builder);
return builder;
}
static char*
get_aotconst_name (MonoJumpInfoType type, gconstpointer data, int got_offset)
{
char *name;
int len;
switch (type) {
case MONO_PATCH_INFO_JIT_ICALL_ID:
name = g_strdup_printf ("jit_icall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name);
break;
case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL:
name = g_strdup_printf ("jit_icall_addr_nocall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name);
break;
case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: {
MonoJumpInfoRgctxEntry *entry = (MonoJumpInfoRgctxEntry*)data;
name = g_strdup_printf ("rgctx_slot_index_%s", mono_rgctx_info_type_to_str (entry->info_type));
break;
}
case MONO_PATCH_INFO_AOT_MODULE:
case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG:
case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR:
case MONO_PATCH_INFO_GC_NURSERY_START:
case MONO_PATCH_INFO_GC_NURSERY_BITS:
case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG:
name = g_strdup_printf ("%s", mono_ji_type_to_string (type));
len = strlen (name);
for (int i = 0; i < len; ++i)
name [i] = tolower (name [i]);
break;
default:
name = g_strdup_printf ("%s_%d", mono_ji_type_to_string (type), got_offset);
len = strlen (name);
for (int i = 0; i < len; ++i)
name [i] = tolower (name [i]);
break;
}
return name;
}
static int
compute_aot_got_offset (MonoLLVMModule *module, MonoJumpInfo *ji, LLVMTypeRef llvm_type)
{
guint32 got_offset = mono_aot_get_got_offset (ji);
LLVMTypeRef lookup_type = (LLVMTypeRef) g_hash_table_lookup (module->got_idx_to_type, GINT_TO_POINTER (got_offset));
if (!lookup_type) {
lookup_type = llvm_type;
} else if (llvm_type != lookup_type) {
lookup_type = module->ptr_type;
} else {
return got_offset;
}
g_hash_table_insert (module->got_idx_to_type, GINT_TO_POINTER (got_offset), lookup_type);
return got_offset;
}
/* Allocate a GOT slot for TYPE/DATA, and emit IR to load it */
static LLVMValueRef
get_aotconst_module (MonoLLVMModule *module, LLVMBuilderRef builder, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type,
guint32 *out_got_offset, MonoJumpInfo **out_ji)
{
guint32 got_offset;
LLVMValueRef load;
MonoJumpInfo tmp_ji;
tmp_ji.type = type;
tmp_ji.data.target = data;
MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji);
if (out_ji)
*out_ji = ji;
got_offset = compute_aot_got_offset (module, ji, llvm_type);
module->max_got_offset = MAX (module->max_got_offset, got_offset);
if (out_got_offset)
*out_got_offset = got_offset;
if (module->static_link && type == MONO_PATCH_INFO_GC_SAFE_POINT_FLAG) {
if (!module->gc_safe_point_flag_var) {
const char *symbol = "mono_polling_required";
module->gc_safe_point_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol);
LLVMSetLinkage (module->gc_safe_point_flag_var, LLVMExternalLinkage);
}
return module->gc_safe_point_flag_var;
}
if (module->static_link && type == MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG) {
if (!module->interrupt_flag_var) {
const char *symbol = "mono_thread_interruption_request_flag";
module->interrupt_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol);
LLVMSetLinkage (module->interrupt_flag_var, LLVMExternalLinkage);
}
return module->interrupt_flag_var;
}
LLVMValueRef const_var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (got_offset));
if (!const_var) {
LLVMTypeRef type = llvm_type;
// FIXME:
char *name = get_aotconst_name (ji->type, ji->data.target, got_offset);
char *symbol = g_strdup_printf ("aotconst_%s", name);
g_free (name);
LLVMValueRef v = LLVMAddGlobal (module->lmodule, type, symbol);
LLVMSetVisibility (v, LLVMHiddenVisibility);
LLVMSetLinkage (v, LLVMInternalLinkage);
LLVMSetInitializer (v, LLVMConstNull (type));
// FIXME:
LLVMSetAlignment (v, 8);
g_hash_table_insert (module->aotconst_vars, GINT_TO_POINTER (got_offset), v);
const_var = v;
}
load = LLVMBuildLoad (builder, const_var, "");
if (mono_aot_is_shared_got_offset (got_offset))
set_invariant_load_flag (load);
if (type == MONO_PATCH_INFO_LDSTR)
set_nonnull_load_flag (load);
load = LLVMBuildBitCast (builder, load, llvm_type, "");
return load;
}
static LLVMValueRef
get_aotconst (EmitContext *ctx, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type)
{
MonoCompile *cfg;
guint32 got_offset;
MonoJumpInfo *ji;
LLVMValueRef load;
cfg = ctx->cfg;
load = get_aotconst_module (ctx->module, ctx->builder, type, data, llvm_type, &got_offset, &ji);
ji->next = cfg->patch_info;
cfg->patch_info = ji;
/*
* If the got slot is shared, it means its initialized when the aot image is loaded, so we don't need to
* explicitly initialize it.
*/
if (!mono_aot_is_shared_got_offset (got_offset)) {
//mono_print_ji (ji);
//printf ("\n");
ctx->cfg->got_access_count ++;
}
return load;
}
static LLVMValueRef
get_dummy_aotconst (EmitContext *ctx, LLVMTypeRef llvm_type)
{
LLVMValueRef indexes [2];
LLVMValueRef got_entry_addr, load;
LLVMBuilderRef builder = ctx->builder;
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
got_entry_addr = LLVMBuildGEP (builder, ctx->module->dummy_got_var, indexes, 2, "");
load = LLVMBuildLoad (builder, got_entry_addr, "");
load = convert (ctx, load, llvm_type);
return load;
}
typedef struct {
MonoJumpInfo *ji;
MonoMethod *method;
LLVMValueRef load;
LLVMTypeRef type;
} CallSite;
static LLVMValueRef
get_callee_llvmonly (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data)
{
LLVMValueRef callee;
char *callee_name = NULL;
if (ctx->module->static_link && ctx->module->assembly->image != mono_get_corlib ()) {
if (type == MONO_PATCH_INFO_JIT_ICALL_ID) {
MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data);
g_assert (info);
if (info->func != info->wrapper) {
type = MONO_PATCH_INFO_METHOD;
data = mono_icall_get_wrapper_method (info);
callee_name = mono_aot_get_mangled_method_name ((MonoMethod*)data);
}
} else if (type == MONO_PATCH_INFO_METHOD) {
MonoMethod *method = (MonoMethod*)data;
if (m_class_get_image (method->klass) != ctx->module->assembly->image && mono_aot_is_externally_callable (method))
callee_name = mono_aot_get_mangled_method_name (method);
}
}
if (!callee_name)
callee_name = mono_aot_get_direct_call_symbol (type, data);
if (callee_name) {
/* Directly callable */
// FIXME: Locking
callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name);
if (!callee) {
callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig);
LLVMSetVisibility (callee, LLVMHiddenVisibility);
g_hash_table_insert (ctx->module->direct_callables, (char*)callee_name, callee);
} else {
/* LLVMTypeRef's are uniqued */
if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig)
return LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0));
g_free (callee_name);
}
return callee;
}
/*
* Change references to icalls/pinvokes/jit icalls to their wrappers when in corlib, so
* they can be called directly.
*/
if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_JIT_ICALL_ID) {
MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data);
if (info->func != info->wrapper) {
type = MONO_PATCH_INFO_METHOD;
data = mono_icall_get_wrapper_method (info);
}
}
if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_METHOD) {
MonoMethod *method = (MonoMethod*)data;
if (m_method_is_icall (method) || m_method_is_pinvoke (method))
data = mono_marshal_get_native_wrapper (method, TRUE, TRUE);
}
/*
* Instead of emitting an indirect call through a got slot, emit a placeholder, and
* replace it with a direct call or an indirect call in mono_llvm_fixup_aot_module ()
* after all methods have been emitted.
*/
if (type == MONO_PATCH_INFO_METHOD) {
MonoMethod *method = (MonoMethod*)data;
if (m_class_get_image (method->klass)->assembly == ctx->module->assembly) {
MonoJumpInfo tmp_ji;
tmp_ji.type = type;
tmp_ji.data.target = method;
MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji);
ji->next = ctx->cfg->patch_info;
ctx->cfg->patch_info = ji;
LLVMTypeRef llvm_type = LLVMPointerType (llvm_sig, 0);
ctx->cfg->got_access_count ++;
CallSite *info = g_new0 (CallSite, 1);
info->method = method;
info->ji = ji;
info->type = llvm_type;
/*
* Emit a dummy load to represent the callee, and either replace it with
* a reference to the llvm method for the callee, or from a load from the
* GOT.
*/
LLVMValueRef load = get_dummy_aotconst (ctx, llvm_type);
info->load = load;
g_ptr_array_add (ctx->callsite_list, info);
return load;
}
}
/*
* All other calls are made through the GOT.
*/
callee = get_aotconst (ctx, type, data, LLVMPointerType (llvm_sig, 0));
return callee;
}
/*
* get_callee:
*
* Return an llvm value representing the callee given by the arguments.
*/
static LLVMValueRef
get_callee (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data)
{
LLVMValueRef callee;
char *callee_name;
MonoJumpInfo *ji = NULL;
if (ctx->llvm_only)
return get_callee_llvmonly (ctx, llvm_sig, type, data);
callee_name = NULL;
/* Cross-assembly direct calls */
if (type == MONO_PATCH_INFO_METHOD) {
MonoMethod *cmethod = (MonoMethod*)data;
if (m_class_get_image (cmethod->klass) != ctx->module->assembly->image) {
MonoJumpInfo tmp_ji;
memset (&tmp_ji, 0, sizeof (MonoJumpInfo));
tmp_ji.type = type;
tmp_ji.data.target = data;
if (mono_aot_is_direct_callable (&tmp_ji)) {
/*
* This will add a reference to cmethod's image so it will
* be loaded when the current AOT image is loaded, so
* the GOT slots used by the init method code are initialized.
*/
tmp_ji.type = MONO_PATCH_INFO_IMAGE;
tmp_ji.data.image = m_class_get_image (cmethod->klass);
ji = mono_aot_patch_info_dup (&tmp_ji);
mono_aot_get_got_offset (ji);
callee_name = mono_aot_get_mangled_method_name (cmethod);
callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name);
if (!callee) {
callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig);
LLVMSetLinkage (callee, LLVMExternalLinkage);
g_hash_table_insert (ctx->module->direct_callables, callee_name, callee);
} else {
/* LLVMTypeRef's are uniqued */
if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig)
callee = LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0));
g_free (callee_name);
}
return callee;
}
}
}
callee_name = mono_aot_get_plt_symbol (type, data);
if (!callee_name)
return NULL;
if (ctx->cfg->compile_aot)
/* Add a patch so referenced wrappers can be compiled in full aot mode */
mono_add_patch_info (ctx->cfg, 0, type, data);
// FIXME: Locking
callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->plt_entries, callee_name);
if (!callee) {
callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig);
LLVMSetVisibility (callee, LLVMHiddenVisibility);
g_hash_table_insert (ctx->module->plt_entries, (char*)callee_name, callee);
}
if (ctx->cfg->compile_aot) {
ji = g_new0 (MonoJumpInfo, 1);
ji->type = type;
ji->data.target = data;
g_hash_table_insert (ctx->module->plt_entries_ji, ji, callee);
}
return callee;
}
static LLVMValueRef
get_jit_callee (EmitContext *ctx, const char *name, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data)
{
gpointer target;
// This won't be patched so compile the wrapper immediately
if (type == MONO_PATCH_INFO_JIT_ICALL_ID) {
MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data);
target = (gpointer)mono_icall_get_wrapper_full (info, TRUE);
} else {
target = resolve_patch (ctx->cfg, type, data);
}
LLVMValueRef tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0)));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
LLVMValueRef callee = LLVMBuildLoad (ctx->builder, tramp_var, "");
return callee;
}
static int
get_handler_clause (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
/* Directly */
if (bb->region != -1 && MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY))
return (bb->region >> 8) - 1;
/* Indirectly */
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, bb->real_offset) && clause->flags == MONO_EXCEPTION_CLAUSE_NONE)
return i;
}
return -1;
}
static MonoExceptionClause *
get_most_deep_clause (MonoCompile *cfg, EmitContext *ctx, MonoBasicBlock *bb)
{
if (bb == cfg->bb_init)
return NULL;
// Since they're sorted by nesting we just need
// the first one that the bb is a member of
for (int i = 0; i < cfg->header->num_clauses; i++) {
MonoExceptionClause *curr = &cfg->header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (curr, bb->real_offset))
return curr;
}
return NULL;
}
static void
set_metadata_flag (LLVMValueRef v, const char *flag_name)
{
LLVMValueRef md_arg;
int md_kind;
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = LLVMMDString ("mono", 4);
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
static void
set_nonnull_load_flag (LLVMValueRef v)
{
LLVMValueRef md_arg;
int md_kind;
const char *flag_name;
flag_name = "nonnull";
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = LLVMMDString ("<index>", strlen ("<index>"));
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
static void
set_nontemporal_flag (LLVMValueRef v)
{
LLVMValueRef md_arg;
int md_kind;
const char *flag_name;
// FIXME: Cache this
flag_name = "nontemporal";
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = const_int32 (1);
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
static void
set_invariant_load_flag (LLVMValueRef v)
{
LLVMValueRef md_arg;
int md_kind;
const char *flag_name;
// FIXME: Cache this
flag_name = "invariant.load";
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = LLVMMDString ("<index>", strlen ("<index>"));
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
/*
* emit_call:
*
* Emit an LLVM call or invoke instruction depending on whenever the call is inside
* a try region.
*/
static LLVMValueRef
emit_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, LLVMValueRef callee, LLVMValueRef *args, int pindex)
{
MonoCompile *cfg = ctx->cfg;
LLVMValueRef lcall = NULL;
LLVMBuilderRef builder = *builder_ref;
MonoExceptionClause *clause;
if (ctx->llvm_only) {
clause = bb ? get_most_deep_clause (cfg, ctx, bb) : NULL;
// FIXME: Use an invoke only for calls inside try-catch blocks
if (clause && (!cfg->deopt || ctx->has_catch)) {
/*
* Have to use an invoke instead of a call, branching to the
* handler bblock of the clause containing this bblock.
*/
intptr_t key = CLAUSE_END (clause);
LLVMBasicBlockRef lpad_bb = (LLVMBasicBlockRef)g_hash_table_lookup (ctx->exc_meta, (gconstpointer)key);
// FIXME: Find the one that has the lowest end bound for the right start address
// FIXME: Finally + nesting
if (lpad_bb) {
LLVMBasicBlockRef noex_bb = gen_bb (ctx, "CALL_NOEX_BB");
/* Use an invoke */
lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, lpad_bb, "");
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
}
}
} else {
int clause_index = get_handler_clause (cfg, bb);
if (clause_index != -1) {
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *ec = &header->clauses [clause_index];
MonoBasicBlock *tblock;
LLVMBasicBlockRef ex_bb, noex_bb;
/*
* Have to use an invoke instead of a call, branching to the
* handler bblock of the clause containing this bblock.
*/
g_assert (ec->flags == MONO_EXCEPTION_CLAUSE_NONE || ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY || ec->flags == MONO_EXCEPTION_CLAUSE_FAULT);
tblock = cfg->cil_offset_to_bb [ec->handler_offset];
g_assert (tblock);
ctx->bblocks [tblock->block_num].invoke_target = TRUE;
ex_bb = get_bb (ctx, tblock);
noex_bb = gen_bb (ctx, "NOEX_BB");
/* Use an invoke */
lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, ex_bb, "");
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
}
}
if (!lcall) {
lcall = LLVMBuildCall (builder, callee, args, pindex, "");
ctx->builder = builder;
}
if (builder_ref)
*builder_ref = ctx->builder;
return lcall;
}
static LLVMValueRef
emit_load (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, LLVMValueRef base, const char *name, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier)
{
LLVMValueRef res;
/*
* We emit volatile loads for loads which can fault, because otherwise
* LLVM will generate invalid code when encountering a load from a
* NULL address.
*/
if (barrier != LLVM_BARRIER_NONE)
res = mono_llvm_build_atomic_load (*builder_ref, addr, name, is_volatile, size, barrier);
else
res = mono_llvm_build_load (*builder_ref, addr, name, is_volatile);
return res;
}
static void
emit_store_general (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier)
{
if (barrier != LLVM_BARRIER_NONE)
mono_llvm_build_aligned_store (*builder_ref, value, addr, barrier, size);
else
mono_llvm_build_store (*builder_ref, value, addr, is_volatile, barrier);
}
static void
emit_store (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile)
{
emit_store_general (ctx, bb, builder_ref, size, value, addr, base, is_faulting, is_volatile, LLVM_BARRIER_NONE);
}
/*
* emit_cond_system_exception:
*
* Emit code to throw the exception EXC_TYPE if the condition CMP is false.
* Might set the ctx exception.
*/
static void
emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit)
{
LLVMBasicBlockRef ex_bb, ex2_bb = NULL, noex_bb;
LLVMBuilderRef builder;
MonoClass *exc_class;
LLVMValueRef args [2];
LLVMValueRef callee;
gboolean no_pc = FALSE;
static MonoClass *exc_classes [MONO_EXC_INTRINS_NUM];
if (IS_TARGET_AMD64)
/* Some platforms don't require the pc argument */
no_pc = TRUE;
int exc_id = mini_exception_id_by_name (exc_type);
if (!exc_classes [exc_id])
exc_classes [exc_id] = mono_class_load_from_name (mono_get_corlib (), "System", exc_type);
exc_class = exc_classes [exc_id];
ex_bb = gen_bb (ctx, "EX_BB");
if (ctx->llvm_only)
ex2_bb = gen_bb (ctx, "EX2_BB");
noex_bb = gen_bb (ctx, "NOEX_BB");
LLVMValueRef branch = LLVMBuildCondBr (ctx->builder, cmp, ex_bb, noex_bb);
if (exc_id == MONO_EXC_NULL_REF && !ctx->cfg->disable_llvm_implicit_null_checks && !force_explicit) {
mono_llvm_set_implicit_branch (ctx->builder, branch);
}
/* Emit exception throwing code */
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, ex_bb);
if (ctx->cfg->llvm_only) {
LLVMBuildBr (builder, ex2_bb);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb);
if (exc_id == MONO_EXC_NULL_REF) {
static LLVMTypeRef sig;
if (!sig)
sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
/* Can't cache this */
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception));
emit_call (ctx, bb, &builder, callee, NULL, 0);
} else {
static LLVMTypeRef sig;
if (!sig)
sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE);
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_corlib_exception));
args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE);
emit_call (ctx, bb, &builder, callee, args, 1);
}
LLVMBuildUnreachable (builder);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
ctx->ex_index ++;
return;
}
callee = ctx->module->throw_corlib_exception;
if (!callee) {
LLVMTypeRef sig;
if (no_pc)
sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE);
else
sig = LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), LLVMPointerType (LLVMInt8Type (), 0), FALSE);
const MonoJitICallId icall_id = MONO_JIT_ICALL_mono_llvm_throw_corlib_exception_abs_trampoline;
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
} else {
/*
* Differences between the LLVM/non-LLVM throw corlib exception trampoline:
* - On x86, LLVM generated code doesn't push the arguments
* - The trampoline takes the throw address as an arguments, not a pc offset.
*/
callee = get_jit_callee (ctx, "llvm_throw_corlib_exception_trampoline", sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
/*
* Make sure that ex_bb starts with the invoke, so the block address points to it, and not to the load
* added by get_jit_callee ().
*/
ex2_bb = gen_bb (ctx, "EX2_BB");
LLVMBuildBr (builder, ex2_bb);
ex_bb = ex2_bb;
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb);
}
}
args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE);
/*
* The LLVM mono branch contains changes so a block address can be passed as an
* argument to a call.
*/
if (no_pc) {
emit_call (ctx, bb, &builder, callee, args, 1);
} else {
args [1] = LLVMBlockAddress (ctx->lmethod, ex_bb);
emit_call (ctx, bb, &builder, callee, args, 2);
}
LLVMBuildUnreachable (builder);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
ctx->ex_index ++;
return;
}
/*
* emit_args_to_vtype:
*
* Emit code to store the vtype in the arguments args to the address ADDRESS.
*/
static void
emit_args_to_vtype (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args)
{
int j, size, nslots;
MonoClass *klass;
t = mini_get_underlying_type (t);
klass = mono_class_from_mono_type_internal (t);
size = mono_class_value_size (klass, NULL);
if (MONO_CLASS_IS_SIMD (ctx->cfg, klass))
address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), "");
if (ainfo->storage == LLVMArgAsFpArgs)
nslots = ainfo->nslots;
else
nslots = 2;
for (j = 0; j < nslots; ++j) {
LLVMValueRef index [2], addr, daddr;
int part_size = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size;
LLVMTypeRef part_type;
while (part_size != 1 && part_size != 2 && part_size != 4 && part_size < 8)
part_size ++;
if (ainfo->pair_storage [j] == LLVMArgNone)
continue;
switch (ainfo->pair_storage [j]) {
case LLVMArgInIReg: {
part_type = LLVMIntType (part_size * 8);
if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) {
index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE);
addr = LLVMBuildGEP (builder, address, index, 1, "");
} else {
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), "");
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
}
LLVMBuildStore (builder, convert (ctx, args [j], part_type), LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (part_type, 0), ""));
break;
}
case LLVMArgInFPReg: {
LLVMTypeRef arg_type;
if (ainfo->esize == 8)
arg_type = LLVMDoubleType ();
else
arg_type = LLVMFloatType ();
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), "");
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
LLVMBuildStore (builder, args [j], addr);
break;
}
case LLVMArgNone:
break;
default:
g_assert_not_reached ();
}
size -= TARGET_SIZEOF_VOID_P;
}
}
/*
* emit_vtype_to_args:
*
* Emit code to load a vtype at address ADDRESS into scalar arguments. Store the arguments
* into ARGS, and the number of arguments into NARGS.
*/
static void
emit_vtype_to_args (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args, guint32 *nargs)
{
int pindex = 0;
int j, nslots;
LLVMTypeRef arg_type;
t = mini_get_underlying_type (t);
int32_t size = get_vtype_size_align (t).size;
if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t)))
address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), "");
if (ainfo->storage == LLVMArgAsFpArgs)
nslots = ainfo->nslots;
else
nslots = 2;
for (j = 0; j < nslots; ++j) {
LLVMValueRef index [2], addr, daddr;
int partsize = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size;
if (ainfo->pair_storage [j] == LLVMArgNone)
continue;
switch (ainfo->pair_storage [j]) {
case LLVMArgInIReg:
if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t))) {
index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE);
addr = LLVMBuildGEP (builder, address, index, 1, "");
} else {
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), "");
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
}
args [pindex ++] = convert (ctx, LLVMBuildLoad (builder, LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (LLVMIntType (partsize * 8), 0), ""), ""), IntPtrType ());
break;
case LLVMArgInFPReg:
if (ainfo->esize == 8)
arg_type = LLVMDoubleType ();
else
arg_type = LLVMFloatType ();
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), "");
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
args [pindex ++] = LLVMBuildLoad (builder, addr, "");
break;
case LLVMArgNone:
break;
default:
g_assert_not_reached ();
}
size -= TARGET_SIZEOF_VOID_P;
}
*nargs = pindex;
}
static LLVMValueRef
build_alloca_llvm_type_name (EmitContext *ctx, LLVMTypeRef t, int align, const char *name)
{
/*
* Have to place all alloca's at the end of the entry bb, since otherwise they would
* get executed every time control reaches them.
*/
LLVMPositionBuilder (ctx->alloca_builder, get_bb (ctx, ctx->cfg->bb_entry), ctx->last_alloca);
ctx->last_alloca = mono_llvm_build_alloca (ctx->alloca_builder, t, NULL, align, name);
return ctx->last_alloca;
}
static LLVMValueRef
build_alloca_llvm_type (EmitContext *ctx, LLVMTypeRef t, int align)
{
return build_alloca_llvm_type_name (ctx, t, align, "");
}
static LLVMValueRef
build_named_alloca (EmitContext *ctx, MonoType *t, char const *name)
{
MonoClass *k = mono_class_from_mono_type_internal (t);
int align;
g_assert (!mini_is_gsharedvt_variable_type (t));
if (MONO_CLASS_IS_SIMD (ctx->cfg, k))
align = mono_class_value_size (k, NULL);
else
align = mono_class_min_align (k);
/* Sometimes align is not a power of 2 */
while (mono_is_power_of_two (align) == -1)
align ++;
return build_alloca_llvm_type_name (ctx, type_to_llvm_type (ctx, t), align, name);
}
static LLVMValueRef
build_alloca (EmitContext *ctx, MonoType *t)
{
return build_named_alloca (ctx, t, "");
}
static LLVMValueRef
emit_gsharedvt_ldaddr (EmitContext *ctx, int vreg)
{
/*
* gsharedvt local.
* Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
*/
MonoCompile *cfg = ctx->cfg;
LLVMBuilderRef builder = ctx->builder;
LLVMValueRef offset, offset_var;
LLVMValueRef info_var = ctx->values [cfg->gsharedvt_info_var->dreg];
LLVMValueRef locals_var = ctx->values [cfg->gsharedvt_locals_var->dreg];
LLVMValueRef ptr;
char *name;
g_assert (info_var);
g_assert (locals_var);
int idx = cfg->gsharedvt_vreg_to_idx [vreg] - 1;
offset = LLVMConstInt (LLVMInt32Type (), MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P), FALSE);
ptr = LLVMBuildAdd (builder, convert (ctx, info_var, IntPtrType ()), convert (ctx, offset, IntPtrType ()), "");
name = g_strdup_printf ("gsharedvt_local_%d_offset", vreg);
offset_var = LLVMBuildLoad (builder, convert (ctx, ptr, LLVMPointerType (LLVMInt32Type (), 0)), name);
return LLVMBuildAdd (builder, convert (ctx, locals_var, IntPtrType ()), convert (ctx, offset_var, IntPtrType ()), "");
}
/*
* Put the global into the 'llvm.used' array to prevent it from being optimized away.
*/
static void
mark_as_used (MonoLLVMModule *module, LLVMValueRef global)
{
if (!module->used)
module->used = g_ptr_array_sized_new (16);
g_ptr_array_add (module->used, global);
}
static void
emit_llvm_used (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMTypeRef used_type;
LLVMValueRef used, *used_elem;
int i;
if (!module->used)
return;
used_type = LLVMArrayType (LLVMPointerType (LLVMInt8Type (), 0), module->used->len);
used = LLVMAddGlobal (lmodule, used_type, "llvm.used");
used_elem = g_new0 (LLVMValueRef, module->used->len);
for (i = 0; i < module->used->len; ++i)
used_elem [i] = LLVMConstBitCast ((LLVMValueRef)g_ptr_array_index (module->used, i), LLVMPointerType (LLVMInt8Type (), 0));
LLVMSetInitializer (used, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), used_elem, module->used->len));
LLVMSetLinkage (used, LLVMAppendingLinkage);
LLVMSetSection (used, "llvm.metadata");
}
/*
* emit_get_method:
*
* Emit a function mapping method indexes to their code
*/
static void
emit_get_method (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func, switch_ins, m;
LLVMBasicBlockRef entry_bb, fail_bb, bb, code_start_bb, code_end_bb, main_bb;
LLVMBasicBlockRef *bbs = NULL;
LLVMTypeRef rtype;
LLVMBuilderRef builder = LLVMCreateBuilder ();
LLVMValueRef table = NULL;
char *name;
int i;
gboolean emit_table = FALSE;
#ifdef TARGET_WASM
/*
* Emit a table of functions instead of a switch statement,
* its very efficient on wasm. This might be usable on
* other platforms too.
*/
emit_table = TRUE;
#endif
rtype = LLVMPointerType (LLVMInt8Type (), 0);
int table_len = module->max_method_idx + 1;
if (emit_table) {
LLVMTypeRef table_type;
LLVMValueRef *table_elems;
char *table_name;
table_type = LLVMArrayType (rtype, table_len);
table_name = g_strdup_printf ("%s_method_table", module->global_prefix);
table = LLVMAddGlobal (lmodule, table_type, table_name);
table_elems = g_new0 (LLVMValueRef, table_len);
for (i = 0; i < table_len; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i));
if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m))
table_elems [i] = LLVMBuildBitCast (builder, m, rtype, "");
else
table_elems [i] = LLVMConstNull (rtype);
}
LLVMSetInitializer (table, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), table_elems, table_len));
}
/*
* Emit a switch statement. Emitting a table of function addresses is smaller/faster,
* but generating code seems safer.
*/
func = LLVMAddFunction (lmodule, module->get_method_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE));
LLVMSetLinkage (func, LLVMExternalLinkage);
LLVMSetVisibility (func, LLVMHiddenVisibility);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->get_method = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
/*
* Return llvm_code_start/llvm_code_end when called with -1/-2.
* Hopefully, the toolchain doesn't reorder these functions. If it does,
* then we will have to find another solution.
*/
name = g_strdup_printf ("BB_CODE_START");
code_start_bb = LLVMAppendBasicBlock (func, name);
g_free (name);
LLVMPositionBuilderAtEnd (builder, code_start_bb);
LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_start, rtype, ""));
name = g_strdup_printf ("BB_CODE_END");
code_end_bb = LLVMAppendBasicBlock (func, name);
g_free (name);
LLVMPositionBuilderAtEnd (builder, code_end_bb);
LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_end, rtype, ""));
if (emit_table) {
/*
* Because table_len is computed using the method indexes available for us, it
* might not include methods which are not compiled because of AOT profiles.
* So table_len can be smaller than info->nmethods. Add a bounds check because
* of that.
* switch (index) {
* case -1: return code_start;
* case -2: return code_end;
* default: return index < table_len ? method_table [index] : 0;
*/
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRet (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), rtype, ""));
main_bb = LLVMAppendBasicBlock (func, "MAIN");
LLVMPositionBuilderAtEnd (builder, main_bb);
LLVMValueRef base = table;
LLVMValueRef indexes [2];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMGetParam (func, 0);
LLVMValueRef addr = LLVMBuildGEP (builder, base, indexes, 2, "");
LLVMValueRef res = mono_llvm_build_load (builder, addr, "", FALSE);
LLVMBuildRet (builder, res);
LLVMBasicBlockRef default_bb = LLVMAppendBasicBlock (func, "DEFAULT");
LLVMPositionBuilderAtEnd (builder, default_bb);
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_len, FALSE), "");
LLVMBuildCondBr (builder, cmp, fail_bb, main_bb);
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), default_bb, 0);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb);
} else {
bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1);
for (i = 0; i < module->max_method_idx + 1; ++i) {
name = g_strdup_printf ("BB_%d", i);
bb = LLVMAppendBasicBlock (func, name);
g_free (name);
bbs [i] = bb;
LLVMPositionBuilderAtEnd (builder, bb);
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i));
if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m))
LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, ""));
else
LLVMBuildRet (builder, LLVMConstNull (rtype));
}
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRet (builder, LLVMConstNull (rtype));
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb);
for (i = 0; i < module->max_method_idx + 1; ++i) {
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
}
}
mark_as_used (module, func);
LLVMDisposeBuilder (builder);
}
/*
* emit_get_unbox_tramp:
*
* Emit a function mapping method indexes to their unbox trampoline
*/
static void
emit_get_unbox_tramp (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func, switch_ins, m;
LLVMBasicBlockRef entry_bb, fail_bb, bb;
LLVMBasicBlockRef *bbs;
LLVMTypeRef rtype;
LLVMBuilderRef builder = LLVMCreateBuilder ();
char *name;
int i;
gboolean emit_table = FALSE;
/* Similar to emit_get_method () */
#ifndef TARGET_WATCHOS
emit_table = TRUE;
#endif
rtype = LLVMPointerType (LLVMInt8Type (), 0);
if (emit_table) {
// About 10% of methods have an unbox tramp, so emit a table of indexes for them
// that the runtime can search using a binary search
int len = 0;
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (m)
len ++;
}
LLVMTypeRef table_type, elemtype;
LLVMValueRef *table_elems;
LLVMValueRef table;
char *table_name;
int table_len;
int elemsize;
table_len = len;
elemsize = module->max_method_idx < 65000 ? 2 : 4;
// The index table
elemtype = elemsize == 2 ? LLVMInt16Type () : LLVMInt32Type ();
table_type = LLVMArrayType (elemtype, table_len);
table_name = g_strdup_printf ("%s_unbox_tramp_indexes", module->global_prefix);
table = LLVMAddGlobal (lmodule, table_type, table_name);
table_elems = g_new0 (LLVMValueRef, table_len);
int idx = 0;
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (m)
table_elems [idx ++] = LLVMConstInt (elemtype, i, FALSE);
}
LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len));
module->unbox_tramp_indexes = table;
// The trampoline table
elemtype = rtype;
table_type = LLVMArrayType (elemtype, table_len);
table_name = g_strdup_printf ("%s_unbox_trampolines", module->global_prefix);
table = LLVMAddGlobal (lmodule, table_type, table_name);
table_elems = g_new0 (LLVMValueRef, table_len);
idx = 0;
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (m)
table_elems [idx ++] = LLVMBuildBitCast (builder, m, rtype, "");
}
LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len));
module->unbox_trampolines = table;
module->unbox_tramp_num = table_len;
module->unbox_tramp_elemsize = elemsize;
return;
}
func = LLVMAddFunction (lmodule, module->get_unbox_tramp_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE));
LLVMSetLinkage (func, LLVMExternalLinkage);
LLVMSetVisibility (func, LLVMHiddenVisibility);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->get_unbox_tramp = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1);
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (!m)
continue;
name = g_strdup_printf ("BB_%d", i);
bb = LLVMAppendBasicBlock (func, name);
g_free (name);
bbs [i] = bb;
LLVMPositionBuilderAtEnd (builder, bb);
LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, ""));
}
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRet (builder, LLVMConstNull (rtype));
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0);
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (!m)
continue;
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
}
mark_as_used (module, func);
LLVMDisposeBuilder (builder);
}
/*
* emit_init_aotconst:
*
* Emit a function to initialize the aotconst_ variables. Called by the runtime.
*/
static void
emit_init_aotconst (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder = LLVMCreateBuilder ();
func = LLVMAddFunction (lmodule, module->init_aotconst_symbol, LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), IntPtrType (), FALSE));
LLVMSetLinkage (func, LLVMExternalLinkage);
LLVMSetVisibility (func, LLVMHiddenVisibility);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->init_aotconst_func = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
LLVMPositionBuilderAtEnd (builder, entry_bb);
#ifdef TARGET_WASM
/* Emit a table of aotconst addresses instead of a switch statement to save space */
LLVMValueRef aotconsts;
LLVMTypeRef aotconst_addr_type = LLVMPointerType (module->ptr_type, 0);
int table_size = module->max_got_offset + 1;
LLVMTypeRef aotconst_arr_type = LLVMArrayType (aotconst_addr_type, table_size);
LLVMValueRef aotconst_dummy = LLVMAddGlobal (module->lmodule, module->ptr_type, "aotconst_dummy");
LLVMSetInitializer (aotconst_dummy, LLVMConstNull (module->ptr_type));
LLVMSetVisibility (aotconst_dummy, LLVMHiddenVisibility);
LLVMSetLinkage (aotconst_dummy, LLVMInternalLinkage);
aotconsts = LLVMAddGlobal (module->lmodule, aotconst_arr_type, "aotconsts");
LLVMValueRef *aotconst_init = g_new0 (LLVMValueRef, table_size);
for (int i = 0; i < table_size; ++i) {
LLVMValueRef aotconst = (LLVMValueRef)g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i));
if (aotconst)
aotconst_init [i] = LLVMConstBitCast (aotconst, aotconst_addr_type);
else
aotconst_init [i] = LLVMConstBitCast (aotconst_dummy, aotconst_addr_type);
}
LLVMSetInitializer (aotconsts, LLVMConstArray (aotconst_addr_type, aotconst_init, table_size));
LLVMSetVisibility (aotconsts, LLVMHiddenVisibility);
LLVMSetLinkage (aotconsts, LLVMInternalLinkage);
LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "EXIT_BB");
LLVMBasicBlockRef main_bb = LLVMAppendBasicBlock (func, "BB");
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_size, FALSE), "");
LLVMBuildCondBr (builder, cmp, exit_bb, main_bb);
LLVMPositionBuilderAtEnd (builder, main_bb);
LLVMValueRef indexes [2];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMGetParam (func, 0);
LLVMValueRef aotconst_addr = LLVMBuildLoad (builder, LLVMBuildGEP (builder, aotconsts, indexes, 2, ""), "");
LLVMBuildStore (builder, LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), module->ptr_type, ""), aotconst_addr);
LLVMBuildBr (builder, exit_bb);
LLVMPositionBuilderAtEnd (builder, exit_bb);
LLVMBuildRetVoid (builder);
#else
LLVMValueRef switch_ins;
LLVMBasicBlockRef fail_bb, bb;
LLVMBasicBlockRef *bbs = NULL;
char *name;
bbs = g_new0 (LLVMBasicBlockRef, module->max_got_offset + 1);
for (int i = 0; i < module->max_got_offset + 1; ++i) {
name = g_strdup_printf ("BB_%d", i);
bb = LLVMAppendBasicBlock (func, name);
g_free (name);
bbs [i] = bb;
LLVMPositionBuilderAtEnd (builder, bb);
LLVMValueRef var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i));
if (var) {
LLVMValueRef addr = LLVMBuildBitCast (builder, var, LLVMPointerType (IntPtrType (), 0), "");
LLVMBuildStore (builder, LLVMGetParam (func, 1), addr);
}
LLVMBuildRetVoid (builder);
}
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRetVoid (builder);
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0);
for (int i = 0; i < module->max_got_offset + 1; ++i)
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
#endif
LLVMDisposeBuilder (builder);
}
/* Add a function to mark the beginning of LLVM code */
static void
emit_llvm_code_start (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder;
func = LLVMAddFunction (lmodule, "llvm_code_start", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->code_start = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
LLVMBuildRetVoid (builder);
LLVMDisposeBuilder (builder);
}
/*
* emit_init_func:
*
* Emit functions to initialize LLVM methods.
* These are wrappers around the mini_llvm_init_method () JIT icall.
* The wrappers handle adding the 'amodule' argument, loading the vtable from different locations, and they have
* a cold calling convention.
*/
static LLVMValueRef
emit_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func, indexes [2], args [16], callee, info_var, index_var, inited_var, cmp;
LLVMBasicBlockRef entry_bb, inited_bb, notinited_bb;
LLVMBuilderRef builder;
LLVMTypeRef icall_sig;
const char *wrapper_name = mono_marshal_get_aot_init_wrapper_name (subtype);
LLVMTypeRef func_type = NULL;
LLVMTypeRef arg_type = module->ptr_type;
char *name = g_strdup_printf ("%s_%s", module->global_prefix, wrapper_name);
switch (subtype) {
case AOT_INIT_METHOD:
func_type = LLVMFunctionType1 (LLVMVoidType (), arg_type, FALSE);
break;
case AOT_INIT_METHOD_GSHARED_MRGCTX:
case AOT_INIT_METHOD_GSHARED_VTABLE:
func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, IntPtrType (), FALSE);
break;
case AOT_INIT_METHOD_GSHARED_THIS:
func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, ObjRefType (), FALSE);
break;
default:
g_assert_not_reached ();
}
func = LLVMAddFunction (lmodule, name, func_type);
info_var = LLVMGetParam (func, 0);
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE);
set_cold_cconv (func);
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
/* Load method_index which is emitted at the start of the method info */
indexes [0] = const_int32 (0);
indexes [1] = const_int32 (0);
// FIXME: Make sure its aligned
index_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, LLVMBuildBitCast (builder, info_var, LLVMPointerType (LLVMInt32Type (), 0), ""), indexes, 1, ""), "method_index");
/* Check for is_inited here as well, since this can be called from JITted code which might not check it */
indexes [0] = const_int32 (0);
indexes [1] = index_var;
inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, module->inited_var, indexes, 2, ""), "is_inited");
cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), "");
inited_bb = LLVMAppendBasicBlock (func, "INITED");
notinited_bb = LLVMAppendBasicBlock (func, "NOT_INITED");
LLVMBuildCondBr (builder, cmp, notinited_bb, inited_bb);
LLVMPositionBuilderAtEnd (builder, notinited_bb);
LLVMValueRef amodule_var = get_aotconst_module (module, builder, MONO_PATCH_INFO_AOT_MODULE, NULL, LLVMPointerType (IntPtrType (), 0), NULL, NULL);
args [0] = LLVMBuildPtrToInt (builder, module->info_var, IntPtrType (), "");
args [1] = LLVMBuildPtrToInt (builder, amodule_var, IntPtrType (), "");
args [2] = info_var;
switch (subtype) {
case AOT_INIT_METHOD:
args [3] = LLVMConstNull (IntPtrType ());
break;
case AOT_INIT_METHOD_GSHARED_VTABLE:
args [3] = LLVMGetParam (func, 1);
break;
case AOT_INIT_METHOD_GSHARED_THIS:
/* Load this->vtable */
args [3] = LLVMBuildBitCast (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), "");
indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoObject, vtable) / SIZEOF_VOID_P);
args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable");
break;
case AOT_INIT_METHOD_GSHARED_MRGCTX:
/* Load mrgctx->vtable */
args [3] = LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), "");
indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable) / SIZEOF_VOID_P);
args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable");
break;
default:
g_assert_not_reached ();
break;
}
/* Call the mini_llvm_init_method JIT icall */
icall_sig = LLVMFunctionType4 (LLVMVoidType (), IntPtrType (), IntPtrType (), arg_type, IntPtrType (), FALSE);
callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GINT_TO_POINTER (MONO_JIT_ICALL_mini_llvm_init_method), LLVMPointerType (icall_sig, 0), NULL, NULL);
LLVMBuildCall (builder, callee, args, LLVMCountParamTypes (icall_sig), "");
/*
* Set the inited flag
* This is already done by the LLVM methods themselves, but its needed by JITted methods.
*/
indexes [0] = const_int32 (0);
indexes [1] = index_var;
LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, module->inited_var, indexes, 2, ""));
LLVMBuildBr (builder, inited_bb);
LLVMPositionBuilderAtEnd (builder, inited_bb);
LLVMBuildRetVoid (builder);
LLVMVerifyFunction (func, LLVMAbortProcessAction);
LLVMDisposeBuilder (builder);
g_free (name);
return func;
}
/* Emit a wrapper around the parameterless JIT icall ICALL_ID with a cold calling convention */
static LLVMValueRef
emit_icall_cold_wrapper (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoJitICallId icall_id, gboolean aot)
{
LLVMValueRef func, callee;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder;
LLVMTypeRef sig;
char *name;
name = g_strdup_printf ("%s_icall_cold_wrapper_%d", module->global_prefix, icall_id);
func = LLVMAddFunction (lmodule, name, LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE);
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE);
set_cold_cconv (func);
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
if (aot) {
callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id), LLVMPointerType (sig, 0), NULL, NULL);
} else {
MonoJitICallInfo * const info = mono_find_jit_icall_info (icall_id);
gpointer target = (gpointer)mono_icall_get_wrapper_full (info, TRUE);
LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, LLVMPointerType (sig, 0), name);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (sig, 0)));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
callee = LLVMBuildLoad (builder, tramp_var, "");
}
LLVMBuildCall (builder, callee, NULL, 0, "");
LLVMBuildRetVoid (builder);
LLVMVerifyFunction(func, LLVMAbortProcessAction);
LLVMDisposeBuilder (builder);
return func;
}
/*
* Emit wrappers around the C icalls used to initialize llvm methods, to
* make the calling code smaller and to enable usage of the llvm
* cold calling convention.
*/
static void
emit_init_funcs (MonoLLVMModule *module)
{
for (int i = 0; i < AOT_INIT_METHOD_NUM; ++i)
module->init_methods [i] = emit_init_func (module, i);
}
static LLVMValueRef
get_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype)
{
return module->init_methods [subtype];
}
static void
emit_gc_safepoint_poll (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoCompile *cfg)
{
gboolean is_aot = cfg == NULL || cfg->compile_aot;
LLVMValueRef func = mono_llvm_get_or_insert_gc_safepoint_poll (lmodule);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
if (is_aot) {
#if TARGET_WIN32
if (module->static_link) {
LLVMSetLinkage (func, LLVMInternalLinkage);
/* Prevent it from being optimized away, leading to asserts inside 'opt' */
mark_as_used (module, func);
} else {
LLVMSetLinkage (func, LLVMWeakODRLinkage);
}
#else
LLVMSetLinkage (func, LLVMWeakODRLinkage);
#endif
} else {
mono_llvm_add_func_attr (func, LLVM_ATTR_OPTIMIZE_NONE); // no need to waste time here, the function is already optimized and will be inlined.
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); // optnone attribute requires noinline (but it will be inlined anyway)
if (!module->gc_poll_cold_wrapper_compiled) {
ERROR_DECL (error);
/* Compiling a method here is a bit ugly, but it works */
MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL);
module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error);
mono_error_assert_ok (error);
}
}
LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.entry");
LLVMBasicBlockRef poll_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.poll");
LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.exit");
LLVMTypeRef ptr_type = LLVMPointerType (IntPtrType (), 0);
LLVMBuilderRef builder = LLVMCreateBuilder ();
/* entry: */
LLVMPositionBuilderAtEnd (builder, entry_bb);
LLVMValueRef poll_val_ptr;
if (is_aot) {
poll_val_ptr = get_aotconst_module (module, builder, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, NULL, ptr_type, NULL, NULL);
} else {
LLVMValueRef poll_val_int = LLVMConstInt (IntPtrType (), (guint64) &mono_polling_required, FALSE);
poll_val_ptr = LLVMBuildIntToPtr (builder, poll_val_int, ptr_type, "");
}
LLVMValueRef poll_val_ptr_load = LLVMBuildLoad (builder, poll_val_ptr, ""); // probably needs to be volatile
LLVMValueRef poll_val = LLVMBuildPtrToInt (builder, poll_val_ptr_load, IntPtrType (), "");
LLVMValueRef poll_val_zero = LLVMConstNull (LLVMTypeOf (poll_val));
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, poll_val, poll_val_zero, "");
mono_llvm_build_weighted_branch (builder, cmp, exit_bb, poll_bb, 1000 /* weight for exit_bb */, 1 /* weight for poll_bb */);
/* poll: */
LLVMPositionBuilderAtEnd (builder, poll_bb);
LLVMValueRef call;
if (is_aot) {
LLVMValueRef icall_wrapper = emit_icall_cold_wrapper (module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, TRUE);
module->gc_poll_cold_wrapper = icall_wrapper;
call = LLVMBuildCall (builder, icall_wrapper, NULL, 0, "");
} else {
// in JIT mode we have to emit @gc.safepoint_poll function for each method (module)
// this function calls gc_poll_cold_wrapper_compiled via a global variable.
// @gc.safepoint_poll will be inlined and can be deleted after -place-safepoints pass.
LLVMTypeRef poll_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
LLVMTypeRef poll_sig_ptr = LLVMPointerType (poll_sig, 0);
gpointer target = resolve_patch (cfg, MONO_PATCH_INFO_ABS, module->gc_poll_cold_wrapper_compiled);
LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, poll_sig_ptr, "mono_threads_state_poll");
LLVMValueRef target_val = LLVMConstInt (LLVMInt64Type (), (guint64) target, FALSE);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (target_val, poll_sig_ptr));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
LLVMValueRef callee = LLVMBuildLoad (builder, tramp_var, "");
call = LLVMBuildCall (builder, callee, NULL, 0, "");
}
set_call_cold_cconv (call);
LLVMBuildBr (builder, exit_bb);
/* exit: */
LLVMPositionBuilderAtEnd (builder, exit_bb);
LLVMBuildRetVoid (builder);
LLVMDisposeBuilder (builder);
}
static void
emit_llvm_code_end (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder;
func = LLVMAddFunction (lmodule, "llvm_code_end", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->code_end = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
LLVMBuildRetVoid (builder);
LLVMDisposeBuilder (builder);
}
static void
emit_div_check (EmitContext *ctx, LLVMBuilderRef builder, MonoBasicBlock *bb, MonoInst *ins, LLVMValueRef lhs, LLVMValueRef rhs)
{
gboolean need_div_check = ctx->cfg->backend->need_div_check;
if (bb->region)
/* LLVM doesn't know that these can throw an exception since they are not called through an intrinsic */
need_div_check = TRUE;
if (!need_div_check)
return;
switch (ins->opcode) {
case OP_IDIV:
case OP_LDIV:
case OP_IREM:
case OP_LREM:
case OP_IDIV_UN:
case OP_LDIV_UN:
case OP_IREM_UN:
case OP_LREM_UN:
case OP_IDIV_IMM:
case OP_LDIV_IMM:
case OP_IREM_IMM:
case OP_LREM_IMM:
case OP_IDIV_UN_IMM:
case OP_LDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LREM_UN_IMM: {
LLVMValueRef cmp;
gboolean is_signed = (ins->opcode == OP_IDIV || ins->opcode == OP_LDIV || ins->opcode == OP_IREM || ins->opcode == OP_LREM ||
ins->opcode == OP_IDIV_IMM || ins->opcode == OP_LDIV_IMM || ins->opcode == OP_IREM_IMM || ins->opcode == OP_LREM_IMM);
cmp = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), 0, FALSE), "");
emit_cond_system_exception (ctx, bb, "DivideByZeroException", cmp, FALSE);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
/* b == -1 && a == 0x80000000 */
if (is_signed) {
LLVMValueRef c = (LLVMTypeOf (lhs) == LLVMInt32Type ()) ? LLVMConstInt (LLVMTypeOf (lhs), 0x80000000, FALSE) : LLVMConstInt (LLVMTypeOf (lhs), 0x8000000000000000LL, FALSE);
LLVMValueRef cond1 = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), -1, FALSE), "");
LLVMValueRef cond2 = LLVMBuildICmp (builder, LLVMIntEQ, lhs, c, "");
cmp = LLVMBuildICmp (builder, LLVMIntEQ, LLVMBuildAnd (builder, cond1, cond2, ""), LLVMConstInt (LLVMInt1Type (), 1, FALSE), "");
emit_cond_system_exception (ctx, bb, "OverflowException", cmp, FALSE);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
}
break;
}
default:
break;
}
}
/*
* emit_method_init:
*
* Emit code to initialize the GOT slots used by the method.
*/
static void
emit_method_init (EmitContext *ctx)
{
LLVMValueRef indexes [16], args [16];
LLVMValueRef inited_var, cmp, call;
LLVMBasicBlockRef inited_bb, notinited_bb;
LLVMBuilderRef builder = ctx->builder;
MonoCompile *cfg = ctx->cfg;
MonoAotInitSubtype subtype;
ctx->module->max_inited_idx = MAX (ctx->module->max_inited_idx, cfg->method_index);
indexes [0] = const_int32 (0);
indexes [1] = const_int32 (cfg->method_index);
inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, ""), "is_inited");
args [0] = inited_var;
args [1] = LLVMConstInt (LLVMInt8Type (), 1, FALSE);
inited_var = LLVMBuildCall (ctx->builder, get_intrins (ctx, INTRINS_EXPECT_I8), args, 2, "");
cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), "");
inited_bb = ctx->inited_bb;
notinited_bb = gen_bb (ctx, "NOTINITED_BB");
ctx->cfg->llvmonly_init_cond = LLVMBuildCondBr (ctx->builder, cmp, notinited_bb, inited_bb);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, notinited_bb);
LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), 0);
char *symbol = g_strdup_printf ("info_dummy_%s", cfg->llvm_method_name);
LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, type, symbol);
g_free (symbol);
cfg->llvm_dummy_info_var = info_var;
int nargs = 0;
args [nargs ++] = convert (ctx, info_var, ctx->module->ptr_type);
switch (cfg->rgctx_access) {
case MONO_RGCTX_ACCESS_MRGCTX:
if (ctx->rgctx_arg) {
args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ());
subtype = AOT_INIT_METHOD_GSHARED_MRGCTX;
} else {
g_assert (ctx->this_arg);
args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ());
subtype = AOT_INIT_METHOD_GSHARED_THIS;
}
break;
case MONO_RGCTX_ACCESS_VTABLE:
args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ());
subtype = AOT_INIT_METHOD_GSHARED_VTABLE;
break;
case MONO_RGCTX_ACCESS_THIS:
args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ());
subtype = AOT_INIT_METHOD_GSHARED_THIS;
break;
case MONO_RGCTX_ACCESS_NONE:
subtype = AOT_INIT_METHOD;
break;
default:
g_assert_not_reached ();
}
call = LLVMBuildCall (builder, ctx->module->init_methods [subtype], args, nargs, "");
/*
* This enables llvm to keep arguments in their original registers/
* scratch registers, since the call will not clobber them.
*/
set_call_cold_cconv (call);
// Set the inited flag
indexes [0] = const_int32 (0);
indexes [1] = const_int32 (cfg->method_index);
LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, ""));
LLVMBuildBr (builder, inited_bb);
ctx->bblocks [cfg->bb_entry->block_num].end_bblock = inited_bb;
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, inited_bb);
}
static void
emit_unbox_tramp (EmitContext *ctx, const char *method_name, LLVMTypeRef method_type, LLVMValueRef method, int method_index)
{
/*
* Emit unbox trampoline using a tailcall
*/
LLVMValueRef tramp, call, *args;
LLVMBuilderRef builder;
LLVMBasicBlockRef lbb;
LLVMCallInfo *linfo;
char *tramp_name;
int i, nargs;
tramp_name = g_strdup_printf ("ut_%s", method_name);
tramp = LLVMAddFunction (ctx->module->lmodule, tramp_name, method_type);
LLVMSetLinkage (tramp, LLVMInternalLinkage);
mono_llvm_add_func_attr (tramp, LLVM_ATTR_OPTIMIZE_FOR_SIZE);
//mono_llvm_add_func_attr (tramp, LLVM_ATTR_NO_UNWIND);
linfo = ctx->linfo;
// FIXME: Reduce code duplication with mono_llvm_compile_method () etc.
if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1)
mono_llvm_add_param_attr (LLVMGetParam (tramp, ctx->rgctx_arg_pindex), LLVM_ATTR_IN_REG);
if (ctx->cfg->vret_addr) {
LLVMSetValueName (LLVMGetParam (tramp, linfo->vret_arg_pindex), "vret");
if (linfo->ret.storage == LLVMArgVtypeByRef) {
mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET);
mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS);
}
}
lbb = LLVMAppendBasicBlock (tramp, "");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, lbb);
nargs = LLVMCountParamTypes (method_type);
args = g_new0 (LLVMValueRef, nargs);
for (i = 0; i < nargs; ++i) {
args [i] = LLVMGetParam (tramp, i);
if (i == ctx->this_arg_pindex) {
LLVMTypeRef arg_type = LLVMTypeOf (args [i]);
args [i] = LLVMBuildPtrToInt (builder, args [i], IntPtrType (), "");
args [i] = LLVMBuildAdd (builder, args [i], LLVMConstInt (IntPtrType (), MONO_ABI_SIZEOF (MonoObject), FALSE), "");
args [i] = LLVMBuildIntToPtr (builder, args [i], arg_type, "");
}
}
call = LLVMBuildCall (builder, method, args, nargs, "");
if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1)
mono_llvm_add_instr_attr (call, 1 + ctx->rgctx_arg_pindex, LLVM_ATTR_IN_REG);
if (linfo->ret.storage == LLVMArgVtypeByRef)
mono_llvm_add_instr_attr (call, 1 + linfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET);
// FIXME: This causes assertions in clang
//mono_llvm_set_must_tailcall (call);
if (LLVMGetReturnType (method_type) == LLVMVoidType ())
LLVMBuildRetVoid (builder);
else
LLVMBuildRet (builder, call);
g_hash_table_insert (ctx->module->idx_to_unbox_tramp, GINT_TO_POINTER (method_index), tramp);
LLVMDisposeBuilder (builder);
}
#ifdef TARGET_WASM
static void
emit_gc_pin (EmitContext *ctx, LLVMBuilderRef builder, int vreg)
{
LLVMValueRef index0 = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
LLVMValueRef index1 = LLVMConstInt (LLVMInt32Type (), ctx->gc_var_indexes [vreg] - 1, FALSE);
LLVMValueRef indexes [] = { index0, index1 };
LLVMValueRef addr = LLVMBuildGEP (builder, ctx->gc_pin_area, indexes, 2, "");
mono_llvm_build_store (builder, convert (ctx, ctx->values [vreg], IntPtrType ()), addr, TRUE, LLVM_BARRIER_NONE);
}
#endif
/*
* emit_entry_bb:
*
* Emit code to load/convert arguments.
*/
static void
emit_entry_bb (EmitContext *ctx, LLVMBuilderRef builder)
{
int i, j, pindex;
MonoCompile *cfg = ctx->cfg;
MonoMethodSignature *sig = ctx->sig;
LLVMCallInfo *linfo = ctx->linfo;
MonoBasicBlock *bb;
char **names;
LLVMBuilderRef old_builder = ctx->builder;
ctx->builder = builder;
ctx->alloca_builder = create_builder (ctx);
#ifdef TARGET_WASM
/*
* For GC stack scanning to work, allocate an area on the stack and store
* every ref vreg into it after its written. Because the stack is scanned
* conservatively, the objects will be pinned, so the vregs can directly
* reference the objects, there is no need to load them from the stack
* on every access.
*/
ctx->gc_var_indexes = g_new0 (int, cfg->next_vreg);
int ngc_vars = 0;
for (i = 0; i < cfg->next_vreg; ++i) {
if (vreg_is_ref (cfg, i)) {
ctx->gc_var_indexes [i] = ngc_vars + 1;
ngc_vars ++;
}
}
// FIXME: Count only live vregs
ctx->gc_pin_area = build_alloca_llvm_type_name (ctx, LLVMArrayType (IntPtrType (), ngc_vars), 0, "gc_pin");
#endif
/*
* Handle indirect/volatile variables by allocating memory for them
* using 'alloca', and storing their address in a temporary.
*/
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *var = cfg->varinfo [i];
if ((var->opcode == OP_GSHAREDVT_LOCAL || var->opcode == OP_GSHAREDVT_ARG_REGOFFSET))
continue;
if (var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (mini_type_is_vtype (var->inst_vtype) && !MONO_CLASS_IS_SIMD (ctx->cfg, var->klass))) {
if (!ctx_ok (ctx))
return;
/* Could be already created by an OP_VPHI */
if (!ctx->addresses [var->dreg]) {
if (var->flags & MONO_INST_LMF) {
// FIXME: Allocate a smaller struct in the deopt case
int size = cfg->deopt ? MONO_ABI_SIZEOF (MonoLMFExt) : MONO_ABI_SIZEOF (MonoLMF);
ctx->addresses [var->dreg] = build_alloca_llvm_type_name (ctx, LLVMArrayType (LLVMInt8Type (), size), sizeof (target_mgreg_t), "lmf");
} else {
char *name = g_strdup_printf ("vreg_loc_%d", var->dreg);
ctx->addresses [var->dreg] = build_named_alloca (ctx, var->inst_vtype, name);
g_free (name);
}
}
ctx->vreg_cli_types [var->dreg] = var->inst_vtype;
}
}
names = g_new (char *, sig->param_count);
mono_method_get_param_names (cfg->method, (const char **) names);
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis];
int reg = cfg->args [i + sig->hasthis]->dreg;
char *name;
pindex = ainfo->pindex;
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
case LLVMArgAsFpArgs: {
LLVMValueRef args [8];
int j;
pindex += ainfo->ndummy_fpargs;
/* The argument is received as a set of int/fp arguments, store them into the real argument */
memset (args, 0, sizeof (args));
if (ainfo->storage == LLVMArgVtypeInReg) {
args [0] = LLVMGetParam (ctx->lmethod, pindex);
if (ainfo->pair_storage [1] != LLVMArgNone)
args [1] = LLVMGetParam (ctx->lmethod, pindex + 1);
} else {
g_assert (ainfo->nslots <= 8);
for (j = 0; j < ainfo->nslots; ++j)
args [j] = LLVMGetParam (ctx->lmethod, pindex + j);
}
ctx->addresses [reg] = build_alloca (ctx, ainfo->type);
emit_args_to_vtype (ctx, builder, ainfo->type, ctx->addresses [reg], ainfo, args);
break;
}
case LLVMArgVtypeByVal: {
ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex);
break;
}
case LLVMArgVtypeAddr:
case LLVMArgVtypeByRef: {
/* The argument is passed by ref */
ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex);
break;
}
case LLVMArgAsIArgs: {
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
int size;
MonoType *t = mini_get_underlying_type (ainfo->type);
/* The argument is received as an array of ints, store it into the real argument */
ctx->addresses [reg] = build_alloca (ctx, t);
size = mono_class_value_size (mono_class_from_mono_type_internal (t), NULL);
if (size == 0) {
} else if (size < TARGET_SIZEOF_VOID_P) {
/* The upper bits of the registers might not be valid */
LLVMValueRef val = LLVMBuildExtractValue (builder, arg, 0, "");
LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (size * 8), 0));
LLVMBuildStore (ctx->builder, LLVMBuildTrunc (builder, val, LLVMIntType (size * 8), ""), dest);
} else {
LLVMBuildStore (ctx->builder, arg, convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMTypeOf (arg), 0)));
}
break;
}
case LLVMArgVtypeAsScalar:
g_assert_not_reached ();
break;
case LLVMArgWasmVtypeAsScalar: {
MonoType *t = mini_get_underlying_type (ainfo->type);
/* The argument is received as a scalar */
ctx->addresses [reg] = build_alloca (ctx, t);
LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0));
LLVMBuildStore (ctx->builder, arg, dest);
break;
}
case LLVMArgGsharedvtFixed: {
/* These are non-gsharedvt arguments passed by ref, the rest of the IR treats them as scalars */
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
if (names [i])
name = g_strdup_printf ("arg_%s", names [i]);
else
name = g_strdup_printf ("arg_%d", i);
ctx->values [reg] = LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), name);
break;
}
case LLVMArgGsharedvtFixedVtype: {
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
if (names [i])
name = g_strdup_printf ("vtype_arg_%s", names [i]);
else
name = g_strdup_printf ("vtype_arg_%d", i);
/* Non-gsharedvt vtype argument passed by ref, the rest of the IR treats it as a vtype */
g_assert (ctx->addresses [reg]);
LLVMSetValueName (ctx->addresses [reg], name);
LLVMBuildStore (builder, LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), ""), ctx->addresses [reg]);
break;
}
case LLVMArgGsharedvtVariable:
/* The IR treats these as variables with addresses */
if (!ctx->addresses [reg])
ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex);
break;
default: {
LLVMTypeRef t;
/* Needed to avoid phi argument mismatch errors since operations on pointers produce i32/i64 */
if (m_type_is_byref (ainfo->type))
t = IntPtrType ();
else
t = type_to_llvm_type (ctx, ainfo->type);
ctx->values [reg] = convert_full (ctx, ctx->values [reg], llvm_type_to_stack_type (cfg, t), type_is_unsigned (ctx, ainfo->type));
break;
}
}
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
case LLVMArgVtypeByVal:
case LLVMArgAsIArgs:
// FIXME: Enabling this fails on windows
case LLVMArgVtypeAddr:
case LLVMArgVtypeByRef:
{
if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (ainfo->type)))
/* Treat these as normal values */
ctx->values [reg] = LLVMBuildLoad (builder, ctx->addresses [reg], "simd_vtype");
break;
}
default:
break;
}
}
g_free (names);
if (sig->hasthis) {
/* Handle this arguments as inputs to phi nodes */
int reg = cfg->args [0]->dreg;
if (ctx->vreg_types [reg])
ctx->values [reg] = convert (ctx, ctx->values [reg], ctx->vreg_types [reg]);
}
if (cfg->vret_addr)
emit_volatile_store (ctx, cfg->vret_addr->dreg);
if (sig->hasthis)
emit_volatile_store (ctx, cfg->args [0]->dreg);
for (i = 0; i < sig->param_count; ++i)
if (!mini_type_is_vtype (sig->params [i]))
emit_volatile_store (ctx, cfg->args [i + sig->hasthis]->dreg);
if (sig->hasthis && !cfg->rgctx_var && cfg->gshared && !cfg->llvm_only) {
LLVMValueRef this_alloc;
/*
* The exception handling code needs the location where the this argument was
* stored for gshared methods. We create a separate alloca to hold it, and mark it
* with the "mono.this" custom metadata to tell llvm that it needs to save its
* location into the LSDA.
*/
this_alloc = mono_llvm_build_alloca (builder, ThisType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, "");
/* This volatile store will keep the alloca alive */
mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE, LLVM_BARRIER_NONE);
set_metadata_flag (this_alloc, "mono.this");
}
if (cfg->rgctx_var) {
if (!(cfg->rgctx_var->flags & MONO_INST_VOLATILE)) {
/* FIXME: This could be volatile even in llvmonly mode if used inside a clause etc. */
g_assert (!ctx->addresses [cfg->rgctx_var->dreg]);
ctx->values [cfg->rgctx_var->dreg] = ctx->rgctx_arg;
} else {
LLVMValueRef rgctx_alloc, store;
/*
* We handle the rgctx arg similarly to the this pointer.
*/
g_assert (ctx->addresses [cfg->rgctx_var->dreg]);
rgctx_alloc = ctx->addresses [cfg->rgctx_var->dreg];
/* This volatile store will keep the alloca alive */
store = mono_llvm_build_store (builder, convert (ctx, ctx->rgctx_arg, IntPtrType ()), rgctx_alloc, TRUE, LLVM_BARRIER_NONE);
(void)store; /* unused */
set_metadata_flag (rgctx_alloc, "mono.this");
}
}
#ifdef TARGET_WASM
/*
* Store ref arguments to the pin area.
* FIXME: This might not be needed, since the caller already does it ?
*/
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *var = cfg->varinfo [i];
if (var->opcode == OP_ARG && vreg_is_ref (cfg, var->dreg) && ctx->values [var->dreg])
emit_gc_pin (ctx, builder, var->dreg);
}
#endif
if (cfg->deopt) {
LLVMValueRef addr, index [2];
MonoMethodHeader *header = cfg->header;
int nfields = (sig->ret->type != MONO_TYPE_VOID ? 1 : 0) + sig->hasthis + sig->param_count + header->num_locals + 2;
LLVMTypeRef *types = g_alloca (nfields * sizeof (LLVMTypeRef));
int findex = 0;
/* method */
types [findex ++] = IntPtrType ();
/* il_offset */
types [findex ++] = LLVMInt32Type ();
int data_start = findex;
/* data */
if (sig->ret->type != MONO_TYPE_VOID)
types [findex ++] = IntPtrType ();
if (sig->hasthis)
types [findex ++] = IntPtrType ();
for (int i = 0; i < sig->param_count; ++i)
types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, sig->params [i]), 0);
for (int i = 0; i < header->num_locals; ++i)
types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, header->locals [i]), 0);
g_assert (findex == nfields);
char *name = g_strdup_printf ("%s_il_state", ctx->method_name);
LLVMTypeRef il_state_type = LLVMStructCreateNamed (ctx->module->context, name);
LLVMStructSetBody (il_state_type, types, nfields, FALSE);
g_free (name);
ctx->il_state = build_alloca_llvm_type_name (ctx, il_state_type, 0, "il_state");
g_assert (cfg->il_state_var);
ctx->addresses [cfg->il_state_var->dreg] = ctx->il_state;
/* Set il_state->il_offset = -1 */
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
LLVMBuildStore (ctx->builder, LLVMConstInt (types [1], -1, FALSE), addr);
/*
* Set il_state->data [i] to either the address of the arg/local, or NULL.
* Because of mono_liveness_handle_exception_clauses (), all locals used/reachable from
* clauses are supposed to be volatile, so they have an address.
*/
findex = data_start;
if (sig->ret->type != MONO_TYPE_VOID) {
LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret);
ctx->il_state_ret = build_alloca_llvm_type_name (ctx, ret_type, 0, "il_state_ret");
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
LLVMBuildStore (ctx->builder, ctx->il_state_ret, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (ctx->il_state_ret), 0)));
findex ++;
}
for (int i = 0; i < sig->hasthis + sig->param_count; ++i) {
LLVMValueRef var_addr = ctx->addresses [cfg->args [i]->dreg];
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
if (var_addr)
LLVMBuildStore (ctx->builder, var_addr, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (var_addr), 0)));
else
LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr);
findex ++;
}
for (int i = 0; i < header->num_locals; ++i) {
LLVMValueRef var_addr = ctx->addresses [cfg->locals [i]->dreg];
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
if (var_addr)
LLVMBuildStore (ctx->builder, LLVMBuildBitCast (builder, var_addr, types [findex], ""), addr);
else
LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr);
findex ++;
}
}
/* Initialize the method if needed */
if (cfg->compile_aot) {
/* Emit a location for the initialization code */
ctx->init_bb = gen_bb (ctx, "INIT_BB");
ctx->inited_bb = gen_bb (ctx, "INITED_BB");
LLVMBuildBr (ctx->builder, ctx->init_bb);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb);
ctx->bblocks [cfg->bb_entry->block_num].end_bblock = ctx->inited_bb;
}
/* Compute nesting between clauses */
ctx->nested_in = (GSList**)mono_mempool_alloc0 (cfg->mempool, sizeof (GSList*) * cfg->header->num_clauses);
for (i = 0; i < cfg->header->num_clauses; ++i) {
for (j = 0; j < cfg->header->num_clauses; ++j) {
MonoExceptionClause *clause1 = &cfg->header->clauses [i];
MonoExceptionClause *clause2 = &cfg->header->clauses [j];
if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset)
ctx->nested_in [i] = g_slist_prepend_mempool (cfg->mempool, ctx->nested_in [i], GINT_TO_POINTER (j));
}
}
/*
* For finally clauses, create an indicator variable telling OP_ENDFINALLY whenever
* it needs to continue normally, or return back to the exception handling system.
*/
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
char name [128];
if (!(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER)))
continue;
if (bb->in_scount == 0) {
LLVMValueRef val;
sprintf (name, "finally_ind_bb%d", bb->block_num);
val = LLVMBuildAlloca (builder, LLVMInt32Type (), name);
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), val);
ctx->bblocks [bb->block_num].finally_ind = val;
} else {
/* Create a variable to hold the exception var */
if (!ctx->ex_var)
ctx->ex_var = LLVMBuildAlloca (builder, ObjRefType (), "exvar");
}
}
ctx->builder = old_builder;
}
static gboolean
needs_extra_arg (EmitContext *ctx, MonoMethod *method)
{
WrapperInfo *info = NULL;
/*
* When targeting wasm, the caller and callee signature has to match exactly. This means
* that every method which can be called indirectly need an extra arg since the caller
* will call it through an ftnptr and will pass an extra arg.
*/
if (!ctx->cfg->llvm_only || !ctx->emit_dummy_arg)
return FALSE;
if (method->wrapper_type)
info = mono_marshal_get_wrapper_info (method);
switch (method->wrapper_type) {
case MONO_WRAPPER_OTHER:
if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG)
/* Already have an explicit extra arg */
return FALSE;
break;
case MONO_WRAPPER_MANAGED_TO_NATIVE:
if (strstr (method->name, "icall_wrapper"))
/* These are JIT icall wrappers which are only called from JITted code directly */
return FALSE;
/* Normal icalls can be virtual methods which need an extra arg */
break;
case MONO_WRAPPER_RUNTIME_INVOKE:
case MONO_WRAPPER_ALLOC:
case MONO_WRAPPER_CASTCLASS:
case MONO_WRAPPER_WRITE_BARRIER:
case MONO_WRAPPER_NATIVE_TO_MANAGED:
return FALSE;
case MONO_WRAPPER_STELEMREF:
if (info->subtype != WRAPPER_SUBTYPE_VIRTUAL_STELEMREF)
return FALSE;
break;
case MONO_WRAPPER_MANAGED_TO_MANAGED:
if (info->subtype == WRAPPER_SUBTYPE_STRING_CTOR)
return FALSE;
break;
default:
break;
}
if (method->string_ctor)
return FALSE;
/* These are called from gsharedvt code with an indirect call which doesn't pass an extra arg */
if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero")))
return FALSE;
return TRUE;
}
static inline gboolean
is_supported_callconv (EmitContext *ctx, MonoCallInst *call)
{
#if defined(TARGET_WIN32) && defined(TARGET_AMD64)
gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) ||
(call->signature->call_convention == MONO_CALL_C) ||
(call->signature->call_convention == MONO_CALL_STDCALL);
#else
gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) || ((call->signature->call_convention == MONO_CALL_C) && ctx->llvm_only);
#endif
return result;
}
static void
process_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, MonoInst *ins)
{
MonoCompile *cfg = ctx->cfg;
LLVMValueRef *values = ctx->values;
LLVMValueRef *addresses = ctx->addresses;
MonoCallInst *call = (MonoCallInst*)ins;
MonoMethodSignature *sig = call->signature;
LLVMValueRef callee = NULL, lcall;
LLVMValueRef *args;
LLVMCallInfo *cinfo;
GSList *l;
int i, len, nargs;
gboolean vretaddr;
LLVMTypeRef llvm_sig;
gpointer target;
gboolean is_virtual, calli;
LLVMBuilderRef builder = *builder_ref;
/* If both imt and rgctx arg are required, only pass the imt arg, the rgctx trampoline will pass the rgctx */
if (call->imt_arg_reg)
call->rgctx_arg_reg = 0;
if (!is_supported_callconv (ctx, call)) {
set_failure (ctx, "non-default callconv");
return;
}
cinfo = call->cinfo;
g_assert (cinfo);
if (call->rgctx_arg_reg)
cinfo->rgctx_arg = TRUE;
if (call->imt_arg_reg)
cinfo->imt_arg = TRUE;
if (!call->rgctx_arg_reg && call->method && needs_extra_arg (ctx, call->method))
cinfo->dummy_arg = TRUE;
vretaddr = (cinfo->ret.storage == LLVMArgVtypeRetAddr || cinfo->ret.storage == LLVMArgVtypeByRef || cinfo->ret.storage == LLVMArgGsharedvtFixed || cinfo->ret.storage == LLVMArgGsharedvtVariable || cinfo->ret.storage == LLVMArgGsharedvtFixedVtype);
llvm_sig = sig_to_llvm_sig_full (ctx, sig, cinfo);
if (!ctx_ok (ctx))
return;
int const opcode = ins->opcode;
is_virtual = opcode == OP_VOIDCALL_MEMBASE || opcode == OP_CALL_MEMBASE
|| opcode == OP_VCALL_MEMBASE || opcode == OP_LCALL_MEMBASE
|| opcode == OP_FCALL_MEMBASE || opcode == OP_RCALL_MEMBASE
|| opcode == OP_TAILCALL_MEMBASE;
calli = !call->fptr_is_patch && (opcode == OP_VOIDCALL_REG || opcode == OP_CALL_REG
|| opcode == OP_VCALL_REG || opcode == OP_LCALL_REG || opcode == OP_FCALL_REG
|| opcode == OP_RCALL_REG || opcode == OP_TAILCALL_REG);
/* FIXME: Avoid creating duplicate methods */
if (ins->flags & MONO_INST_HAS_METHOD) {
if (is_virtual) {
callee = NULL;
} else {
if (cfg->compile_aot) {
callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_METHOD, call->method);
if (!callee) {
set_failure (ctx, "can't encode patch");
return;
}
} else if (cfg->method == call->method) {
callee = ctx->lmethod;
} else {
ERROR_DECL (error);
static int tramp_index;
char *name;
name = g_strdup_printf ("[tramp_%d] %s", tramp_index, mono_method_full_name (call->method, TRUE));
tramp_index ++;
/*
* Use our trampoline infrastructure for lazy compilation instead of llvm's.
* Make all calls through a global. The address of the global will be saved in
* MonoJitDomainInfo.llvm_jit_callees and updated when the method it refers to is
* compiled.
*/
LLVMValueRef tramp_var = (LLVMValueRef)g_hash_table_lookup (ctx->jit_callees, call->method);
if (!tramp_var) {
target =
mono_create_jit_trampoline (call->method, error);
if (!is_ok (error)) {
set_failure (ctx, mono_error_get_message (error));
mono_error_cleanup (error);
return;
}
tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0)));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
g_hash_table_insert (ctx->jit_callees, call->method, tramp_var);
}
callee = LLVMBuildLoad (builder, tramp_var, "");
}
}
if (!cfg->llvm_only && call->method && strstr (m_class_get_name (call->method->klass), "AsyncVoidMethodBuilder")) {
/* LLVM miscompiles async methods */
set_failure (ctx, "#13734");
return;
}
} else if (calli) {
} else {
const MonoJitICallId jit_icall_id = call->jit_icall_id;
if (jit_icall_id) {
if (cfg->compile_aot) {
callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id));
if (!callee) {
set_failure (ctx, "can't encode patch");
return;
}
} else {
callee = get_jit_callee (ctx, "", llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id));
}
} else {
if (cfg->compile_aot) {
callee = NULL;
if (cfg->abs_patches) {
MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr);
if (abs_ji) {
callee = get_callee (ctx, llvm_sig, abs_ji->type, abs_ji->data.target);
if (!callee) {
set_failure (ctx, "can't encode patch");
return;
}
}
}
if (!callee) {
set_failure (ctx, "aot");
return;
}
} else {
if (cfg->abs_patches) {
MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr);
if (abs_ji) {
ERROR_DECL (error);
target = mono_resolve_patch_target (cfg->method, NULL, abs_ji, FALSE, error);
mono_error_assert_ok (error);
callee = get_jit_callee (ctx, "", llvm_sig, abs_ji->type, abs_ji->data.target);
} else {
g_assert_not_reached ();
}
} else {
g_assert_not_reached ();
}
}
}
}
if (is_virtual) {
int size = TARGET_SIZEOF_VOID_P;
LLVMValueRef index;
g_assert (ins->inst_offset % size == 0);
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
callee = convert (ctx, LLVMBuildLoad (builder, LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (LLVMPointerType (IntPtrType (), 0), 0)), &index, 1, ""), ""), LLVMPointerType (llvm_sig, 0));
} else if (calli) {
callee = convert (ctx, values [ins->sreg1], LLVMPointerType (llvm_sig, 0));
} else {
if (ins->flags & MONO_INST_HAS_METHOD) {
}
}
/*
* Collect and convert arguments
*/
nargs = (sig->param_count * 16) + sig->hasthis + vretaddr + call->rgctx_reg + call->imt_arg_reg + call->cinfo->dummy_arg + 1;
len = sizeof (LLVMValueRef) * nargs;
args = g_newa (LLVMValueRef, nargs);
memset (args, 0, len);
l = call->out_ireg_args;
if (call->rgctx_arg_reg) {
g_assert (values [call->rgctx_arg_reg]);
g_assert (cinfo->rgctx_arg_pindex < nargs);
/*
* On ARM, the imt/rgctx argument is passed in a caller save register, but some of our trampolines etc. clobber it, leading to
* problems is LLVM moves the arg assignment earlier. To work around this, save the argument into a stack slot and load
* it using a volatile load.
*/
#ifdef TARGET_ARM
if (!ctx->imt_rgctx_loc)
ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P);
LLVMBuildStore (builder, convert (ctx, ctx->values [call->rgctx_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc);
args [cinfo->rgctx_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
#else
args [cinfo->rgctx_arg_pindex] = convert (ctx, values [call->rgctx_arg_reg], ctx->module->ptr_type);
#endif
}
if (call->imt_arg_reg) {
g_assert (!ctx->llvm_only);
g_assert (values [call->imt_arg_reg]);
g_assert (cinfo->imt_arg_pindex < nargs);
#ifdef TARGET_ARM
if (!ctx->imt_rgctx_loc)
ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P);
LLVMBuildStore (builder, convert (ctx, ctx->values [call->imt_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc);
args [cinfo->imt_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
#else
args [cinfo->imt_arg_pindex] = convert (ctx, values [call->imt_arg_reg], ctx->module->ptr_type);
#endif
}
switch (cinfo->ret.storage) {
case LLVMArgGsharedvtVariable: {
MonoInst *var = get_vreg_to_inst (cfg, call->inst.dreg);
if (var && var->opcode == OP_GSHAREDVT_LOCAL) {
args [cinfo->vret_arg_pindex] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), IntPtrType ());
} else {
g_assert (addresses [call->inst.dreg]);
args [cinfo->vret_arg_pindex] = convert (ctx, addresses [call->inst.dreg], IntPtrType ());
}
break;
}
default:
if (vretaddr) {
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
g_assert (cinfo->vret_arg_pindex < nargs);
if (cinfo->ret.storage == LLVMArgVtypeByRef)
args [cinfo->vret_arg_pindex] = addresses [call->inst.dreg];
else
args [cinfo->vret_arg_pindex] = LLVMBuildPtrToInt (builder, addresses [call->inst.dreg], IntPtrType (), "");
}
break;
}
/*
* Sometimes the same method is called with two different signatures (i.e. with and without 'this'), so
* use the real callee for argument type conversion.
*/
LLVMTypeRef callee_type = LLVMGetElementType (LLVMTypeOf (callee));
LLVMTypeRef *param_types = (LLVMTypeRef*)g_alloca (sizeof (LLVMTypeRef) * LLVMCountParamTypes (callee_type));
LLVMGetParamTypes (callee_type, param_types);
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
guint32 regpair;
int reg, pindex;
LLVMArgInfo *ainfo = &call->cinfo->args [i];
pindex = ainfo->pindex;
regpair = (guint32)(gssize)(l->data);
reg = regpair & 0xffffff;
args [pindex] = values [reg];
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
case LLVMArgAsFpArgs: {
guint32 nargs;
int j;
for (j = 0; j < ainfo->ndummy_fpargs; ++j)
args [pindex + j] = LLVMConstNull (LLVMDoubleType ());
pindex += ainfo->ndummy_fpargs;
g_assert (addresses [reg]);
emit_vtype_to_args (ctx, builder, ainfo->type, addresses [reg], ainfo, args + pindex, &nargs);
pindex += nargs;
// FIXME: alignment
// FIXME: Get rid of the VMOVE
break;
}
case LLVMArgVtypeByVal:
g_assert (addresses [reg]);
args [pindex] = addresses [reg];
break;
case LLVMArgVtypeAddr :
case LLVMArgVtypeByRef: {
g_assert (addresses [reg]);
args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0));
break;
}
case LLVMArgAsIArgs:
g_assert (addresses [reg]);
if (ainfo->esize == 8)
args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (LLVMInt64Type (), ainfo->nslots), 0)), "");
else
args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (IntPtrType (), ainfo->nslots), 0)), "");
break;
case LLVMArgVtypeAsScalar:
g_assert_not_reached ();
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (addresses [reg]);
args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0)), "");
break;
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
g_assert (addresses [reg]);
args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0));
break;
case LLVMArgGsharedvtVariable:
g_assert (addresses [reg]);
args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (IntPtrType (), 0));
break;
default:
g_assert (args [pindex]);
if (i == 0 && sig->hasthis)
args [pindex] = convert (ctx, args [pindex], param_types [pindex]);
else
args [pindex] = convert (ctx, args [pindex], type_to_llvm_arg_type (ctx, ainfo->type));
break;
}
g_assert (pindex <= nargs);
l = l->next;
}
if (call->cinfo->dummy_arg) {
g_assert (call->cinfo->dummy_arg_pindex < nargs);
args [call->cinfo->dummy_arg_pindex] = LLVMConstNull (ctx->module->ptr_type);
}
// FIXME: Align call sites
/*
* Emit the call
*/
lcall = emit_call (ctx, bb, &builder, callee, args, LLVMCountParamTypes (llvm_sig));
mono_llvm_nonnull_state_update (ctx, lcall, call->method, args, LLVMCountParamTypes (llvm_sig));
// If we just allocated an object, it's not null.
if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) {
mono_llvm_set_call_nonnull_ret (lcall);
}
if (ins->opcode != OP_TAILCALL && ins->opcode != OP_TAILCALL_MEMBASE && LLVMGetInstructionOpcode (lcall) == LLVMCall)
mono_llvm_set_call_notailcall (lcall);
// Add original method name we are currently emitting as a custom string metadata (the only way to leave comments in LLVM IR)
if (mono_debug_enabled () && call && call->method)
mono_llvm_add_string_metadata (lcall, "managed_name", mono_method_full_name (call->method, TRUE));
// As per the LLVM docs, a function has a noalias return value if and only if
// it is an allocation function. This is an allocation function.
if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) {
mono_llvm_set_call_noalias_ret (lcall);
// All objects are expected to be 8-byte aligned (SGEN_ALLOC_ALIGN)
mono_llvm_set_alignment_ret (lcall, 8);
}
/*
* Modify cconv and parameter attributes to pass rgctx/imt correctly.
*/
#if defined(MONO_ARCH_IMT_REG) && defined(MONO_ARCH_RGCTX_REG)
g_assert (MONO_ARCH_IMT_REG == MONO_ARCH_RGCTX_REG);
#endif
/* The two can't be used together, so use only one LLVM calling conv to pass them */
g_assert (!(call->rgctx_arg_reg && call->imt_arg_reg));
if (!sig->pinvoke && !cfg->llvm_only)
LLVMSetInstructionCallConv (lcall, LLVMMono1CallConv);
if (cinfo->ret.storage == LLVMArgVtypeByRef)
mono_llvm_add_instr_attr (lcall, 1 + cinfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET);
if (!ctx->llvm_only && call->rgctx_arg_reg)
mono_llvm_add_instr_attr (lcall, 1 + cinfo->rgctx_arg_pindex, LLVM_ATTR_IN_REG);
if (call->imt_arg_reg)
mono_llvm_add_instr_attr (lcall, 1 + cinfo->imt_arg_pindex, LLVM_ATTR_IN_REG);
/* Add byval attributes if needed */
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &call->cinfo->args [i + sig->hasthis];
if (ainfo && ainfo->storage == LLVMArgVtypeByVal)
mono_llvm_add_instr_attr (lcall, 1 + ainfo->pindex, LLVM_ATTR_BY_VAL);
#ifdef TARGET_WASM
if (ainfo && ainfo->storage == LLVMArgVtypeByRef)
/* This causes llvm to make a copy of the value which is what we need */
mono_llvm_add_instr_byval_attr (lcall, 1 + ainfo->pindex, LLVMGetElementType (param_types [ainfo->pindex]));
#endif
}
gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret));
gboolean should_promote_to_value = FALSE;
const char *load_name = NULL;
/*
* Convert the result. Non-SIMD value types are manipulated via an
* indirection. SIMD value types are represented directly as LLVM vector
* values, and must have a corresponding LLVM value definition in
* `values`.
*/
switch (cinfo->ret.storage) {
case LLVMArgAsIArgs:
case LLVMArgFpStruct:
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE));
break;
case LLVMArgVtypeByVal:
/*
* Only used by amd64 and x86. Only ever used when passing
* arguments; never used for return values.
*/
g_assert_not_reached ();
break;
case LLVMArgVtypeInReg: {
if (LLVMTypeOf (lcall) == LLVMVoidType ())
/* Empty struct */
break;
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_alloca (ctx, sig->ret);
LLVMValueRef regs [2] = { 0 };
regs [0] = LLVMBuildExtractValue (builder, lcall, 0, "");
if (cinfo->ret.pair_storage [1] != LLVMArgNone)
regs [1] = LLVMBuildExtractValue (builder, lcall, 1, "");
emit_args_to_vtype (ctx, builder, sig->ret, addresses [ins->dreg], &cinfo->ret, regs);
load_name = "process_call_vtype_in_reg";
should_promote_to_value = is_simd;
break;
}
case LLVMArgVtypeAsScalar:
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE));
load_name = "process_call_vtype_as_scalar";
should_promote_to_value = is_simd;
break;
case LLVMArgVtypeRetAddr:
case LLVMArgVtypeByRef:
load_name = "process_call_vtype_ret_addr";
should_promote_to_value = is_simd;
break;
case LLVMArgGsharedvtVariable:
break;
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
values [ins->dreg] = LLVMBuildLoad (builder, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0), FALSE), "");
break;
case LLVMArgWasmVtypeAsScalar:
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE));
break;
default:
if (sig->ret->type != MONO_TYPE_VOID)
/* If the method returns an unsigned value, need to zext it */
values [ins->dreg] = convert_full (ctx, lcall, llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, sig->ret)), type_is_unsigned (ctx, sig->ret));
break;
}
if (should_promote_to_value) {
g_assert (addresses [call->inst.dreg]);
LLVMTypeRef addr_type = LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0);
LLVMValueRef addr = convert_full (ctx, addresses [call->inst.dreg], addr_type, FALSE);
values [ins->dreg] = LLVMBuildLoad (builder, addr, load_name);
}
*builder_ref = ctx->builder;
}
static void
emit_llvmonly_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc)
{
MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mini_llvmonly_rethrow_exception : MONO_JIT_ICALL_mini_llvmonly_throw_exception;
LLVMValueRef callee = rethrow ? ctx->module->rethrow : ctx->module->throw_icall;
LLVMTypeRef exc_type = type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_exception_class ()));
if (!callee) {
LLVMTypeRef fun_sig = LLVMFunctionType1 (LLVMVoidType (), exc_type, FALSE);
g_assert (ctx->cfg->compile_aot);
callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (icall_id));
}
LLVMValueRef args [2];
args [0] = convert (ctx, exc, exc_type);
emit_call (ctx, bb, &ctx->builder, callee, args, 1);
LLVMBuildUnreachable (ctx->builder);
ctx->builder = create_builder (ctx);
}
static void
emit_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc)
{
MonoMethodSignature *throw_sig;
LLVMValueRef * const pcallee = rethrow ? &ctx->module->rethrow : &ctx->module->throw_icall;
LLVMValueRef callee = *pcallee;
char const * const icall_name = rethrow ? "mono_arch_rethrow_exception" : "mono_arch_throw_exception";
#ifndef TARGET_X86
const
#endif
MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mono_arch_rethrow_exception : MONO_JIT_ICALL_mono_arch_throw_exception;
if (!callee) {
throw_sig = mono_metadata_signature_alloc (mono_get_corlib (), 1);
throw_sig->ret = m_class_get_byval_arg (mono_get_void_class ());
throw_sig->params [0] = m_class_get_byval_arg (mono_get_object_class ());
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
} else {
#ifdef TARGET_X86
/*
* LLVM doesn't push the exception argument, so we need a different
* trampoline.
*/
icall_id = rethrow ? MONO_JIT_ICALL_mono_llvm_rethrow_exception_trampoline : MONO_JIT_ICALL_mono_llvm_throw_exception_trampoline;
#endif
callee = get_jit_callee (ctx, icall_name, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
}
mono_memory_barrier ();
}
LLVMValueRef arg;
arg = convert (ctx, exc, type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_object_class ())));
emit_call (ctx, bb, &ctx->builder, callee, &arg, 1);
}
static void
emit_resume_eh (EmitContext *ctx, MonoBasicBlock *bb)
{
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception;
LLVMValueRef callee;
LLVMTypeRef fun_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
g_assert (ctx->cfg->compile_aot);
callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
emit_call (ctx, bb, &ctx->builder, callee, NULL, 0);
LLVMBuildUnreachable (ctx->builder);
ctx->builder = create_builder (ctx);
}
static LLVMValueRef
mono_llvm_emit_clear_exception_call (EmitContext *ctx, LLVMBuilderRef builder)
{
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_clear_exception;
LLVMTypeRef call_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE);
LLVMValueRef callee = NULL;
if (!callee) {
callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
}
g_assert (builder && callee);
return LLVMBuildCall (builder, callee, NULL, 0, "");
}
static LLVMValueRef
mono_llvm_emit_load_exception_call (EmitContext *ctx, LLVMBuilderRef builder)
{
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_load_exception;
LLVMTypeRef call_sig = LLVMFunctionType (ObjRefType (), NULL, 0, FALSE);
LLVMValueRef callee = NULL;
g_assert (ctx->cfg->compile_aot);
if (!callee) {
callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
}
g_assert (builder && callee);
return LLVMBuildCall (builder, callee, NULL, 0, "load_exception");
}
static LLVMValueRef
mono_llvm_emit_match_exception_call (EmitContext *ctx, LLVMBuilderRef builder, gint32 region_start, gint32 region_end)
{
const char *icall_name = "mini_llvmonly_match_exception";
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_match_exception;
ctx->builder = builder;
LLVMValueRef args[5];
const int num_args = G_N_ELEMENTS (args);
args [0] = convert (ctx, get_aotconst (ctx, MONO_PATCH_INFO_AOT_JIT_INFO, GINT_TO_POINTER (ctx->cfg->method_index), LLVMPointerType (IntPtrType (), 0)), IntPtrType ());
args [1] = LLVMConstInt (LLVMInt32Type (), region_start, 0);
args [2] = LLVMConstInt (LLVMInt32Type (), region_end, 0);
if (ctx->cfg->rgctx_var) {
if (ctx->cfg->llvm_only) {
args [3] = convert (ctx, ctx->rgctx_arg, IntPtrType ());
} else {
LLVMValueRef rgctx_alloc = ctx->addresses [ctx->cfg->rgctx_var->dreg];
g_assert (rgctx_alloc);
args [3] = LLVMBuildLoad (builder, convert (ctx, rgctx_alloc, LLVMPointerType (IntPtrType (), 0)), "");
}
} else {
args [3] = LLVMConstInt (IntPtrType (), 0, 0);
}
if (ctx->this_arg)
args [4] = convert (ctx, ctx->this_arg, IntPtrType ());
else
args [4] = LLVMConstInt (IntPtrType (), 0, 0);
LLVMTypeRef match_sig = LLVMFunctionType5 (LLVMInt32Type (), IntPtrType (), LLVMInt32Type (), LLVMInt32Type (), IntPtrType (), IntPtrType (), FALSE);
LLVMValueRef callee;
g_assert (ctx->cfg->compile_aot);
ctx->builder = builder;
// get_callee expects ctx->builder to be the emitting builder
callee = get_callee (ctx, match_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
g_assert (builder && callee);
g_assert (ctx->ex_var);
return LLVMBuildCall (builder, callee, args, num_args, icall_name);
}
// FIXME: This won't work because the code-finding makes this
// not a constant.
/*#define MONO_PERSONALITY_DEBUG*/
#ifdef MONO_PERSONALITY_DEBUG
static const gboolean use_mono_personality_debug = TRUE;
static const char *default_personality_name = "mono_debug_personality";
#else
static const gboolean use_mono_personality_debug = FALSE;
static const char *default_personality_name = "__gxx_personality_v0";
#endif
static LLVMTypeRef
default_cpp_lpad_exc_signature (void)
{
static LLVMTypeRef sig;
if (!sig) {
LLVMTypeRef signature [2];
signature [0] = LLVMPointerType (LLVMInt8Type (), 0);
signature [1] = LLVMInt32Type ();
sig = LLVMStructType (signature, 2, FALSE);
}
return sig;
}
static LLVMValueRef
get_mono_personality (EmitContext *ctx)
{
LLVMValueRef personality = NULL;
LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE);
g_assert (ctx->cfg->compile_aot);
if (!use_mono_personality_debug) {
personality = LLVMGetNamedFunction (ctx->lmodule, default_personality_name);
} else {
personality = get_callee (ctx, personality_type, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debug_personality));
}
g_assert (personality);
return personality;
}
static LLVMBasicBlockRef
emit_landing_pad (EmitContext *ctx, int group_index, int group_size)
{
MonoCompile *cfg = ctx->cfg;
LLVMBuilderRef old_builder = ctx->builder;
MonoExceptionClause *group_start = cfg->header->clauses + group_index;
LLVMBuilderRef lpadBuilder = create_builder (ctx);
ctx->builder = lpadBuilder;
MonoBasicBlock *handler_bb = cfg->cil_offset_to_bb [CLAUSE_START (group_start)];
g_assert (handler_bb);
// <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+
LLVMValueRef personality = get_mono_personality (ctx);
g_assert (personality);
char *bb_name = g_strdup_printf ("LPAD%d_BB", group_index);
LLVMBasicBlockRef lpad_bb = gen_bb (ctx, bb_name);
g_free (bb_name);
LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb);
LLVMValueRef landing_pad = LLVMBuildLandingPad (lpadBuilder, default_cpp_lpad_exc_signature (), personality, 0, "");
g_assert (landing_pad);
LLVMValueRef cast = LLVMBuildBitCast (lpadBuilder, ctx->module->sentinel_exception, LLVMPointerType (LLVMInt8Type (), 0), "int8TypeInfo");
LLVMAddClause (landing_pad, cast);
if (ctx->cfg->deopt) {
/*
* Call mini_llvmonly_resume_exception_il_state (lmf, il_state)
*
* The call will execute the catch clause and the rest of the method and store the return
* value into ctx->il_state_ret.
*/
if (!ctx->has_catch) {
/* Unused */
LLVMBuildUnreachable (lpadBuilder);
return lpad_bb;
}
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception_il_state;
LLVMValueRef callee;
LLVMValueRef args [2];
LLVMTypeRef fun_sig = LLVMFunctionType2 (LLVMVoidType (), IntPtrType (), IntPtrType (), FALSE);
callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
g_assert (ctx->cfg->lmf_var);
g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]);
args [0] = LLVMBuildPtrToInt (ctx->builder, ctx->addresses [ctx->cfg->lmf_var->dreg], IntPtrType (), "");
args [1] = LLVMBuildPtrToInt (ctx->builder, ctx->il_state, IntPtrType (), "");
emit_call (ctx, NULL, &ctx->builder, callee, args, 2);
/* Return the value set in ctx->il_state_ret */
LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (ctx->lmethod)));
LLVMBuilderRef builder = ctx->builder;
LLVMValueRef addr, retval, gep, indexes [2];
switch (ctx->linfo->ret.storage) {
case LLVMArgNone:
LLVMBuildRetVoid (builder);
break;
case LLVMArgNormal:
case LLVMArgWasmVtypeAsScalar:
case LLVMArgVtypeInReg: {
if (ctx->sig->ret->type == MONO_TYPE_VOID) {
LLVMBuildRetVoid (builder);
break;
}
addr = ctx->il_state_ret;
g_assert (addr);
addr = convert (ctx, ctx->il_state_ret, LLVMPointerType (ret_type, 0));
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
gep = LLVMBuildGEP (builder, addr, indexes, 1, "");
LLVMBuildRet (builder, LLVMBuildLoad (builder, gep, ""));
break;
}
case LLVMArgVtypeRetAddr: {
LLVMValueRef ret_addr;
g_assert (cfg->vret_addr);
ret_addr = ctx->values [cfg->vret_addr->dreg];
addr = ctx->il_state_ret;
g_assert (addr);
/* The ret value is in il_state_ret, copy it to the memory pointed to by the vret arg */
ret_type = type_to_llvm_type (ctx, ctx->sig->ret);
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
gep = LLVMBuildGEP (builder, addr, indexes, 1, "");
retval = convert (ctx, LLVMBuildLoad (builder, gep, ""), ret_type);
LLVMBuildStore (builder, retval, convert (ctx, ret_addr, LLVMPointerType (ret_type, 0)));
LLVMBuildRetVoid (builder);
break;
}
default:
g_assert_not_reached ();
break;
}
return lpad_bb;
}
LLVMBasicBlockRef resume_bb = gen_bb (ctx, "RESUME_BB");
LLVMBuilderRef resume_builder = create_builder (ctx);
ctx->builder = resume_builder;
LLVMPositionBuilderAtEnd (resume_builder, resume_bb);
emit_resume_eh (ctx, handler_bb);
// Build match
ctx->builder = lpadBuilder;
LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb);
gboolean finally_only = TRUE;
MonoExceptionClause *group_cursor = group_start;
for (int i = 0; i < group_size; i ++) {
if (!(group_cursor->flags & MONO_EXCEPTION_CLAUSE_FINALLY || group_cursor->flags & MONO_EXCEPTION_CLAUSE_FAULT))
finally_only = FALSE;
group_cursor++;
}
// FIXME:
// Handle landing pad inlining
if (!finally_only) {
// So at each level of the exception stack we will match the exception again.
// During that match, we need to compare against the handler types for the current
// protected region. We send the try start and end so that we can only check against
// handlers for this lexical protected region.
LLVMValueRef match = mono_llvm_emit_match_exception_call (ctx, lpadBuilder, group_start->try_offset, group_start->try_offset + group_start->try_len);
// if returns -1, resume
LLVMValueRef switch_ins = LLVMBuildSwitch (lpadBuilder, match, resume_bb, group_size);
// else move to that target bb
for (int i = 0; i < group_size; i++) {
MonoExceptionClause *clause = group_start + i;
int clause_index = clause - cfg->header->clauses;
MonoBasicBlock *handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index));
g_assert (handler_bb);
g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
}
} else {
int clause_index = group_start - cfg->header->clauses;
MonoBasicBlock *finally_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index));
g_assert (finally_bb);
LLVMBuildBr (ctx->builder, ctx->bblocks [finally_bb->block_num].call_handler_target_bb);
}
ctx->builder = old_builder;
return lpad_bb;
}
static LLVMValueRef
create_const_vector (LLVMTypeRef t, const int *vals, int count)
{
g_assert (count <= MAX_VECTOR_ELEMS);
LLVMValueRef llvm_vals [MAX_VECTOR_ELEMS];
for (int i = 0; i < count; i++)
llvm_vals [i] = LLVMConstInt (t, vals [i], FALSE);
return LLVMConstVector (llvm_vals, count);
}
static LLVMValueRef
create_const_vector_i32 (const int *mask, int count)
{
return create_const_vector (LLVMInt32Type (), mask, count);
}
static LLVMValueRef
create_const_vector_4_i32 (int v0, int v1, int v2, int v3)
{
LLVMValueRef mask [4];
mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE);
mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE);
mask [2] = LLVMConstInt (LLVMInt32Type (), v2, FALSE);
mask [3] = LLVMConstInt (LLVMInt32Type (), v3, FALSE);
return LLVMConstVector (mask, 4);
}
static LLVMValueRef
create_const_vector_2_i32 (int v0, int v1)
{
LLVMValueRef mask [2];
mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE);
mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE);
return LLVMConstVector (mask, 2);
}
static LLVMValueRef
broadcast_element (EmitContext *ctx, LLVMValueRef elem, int count)
{
LLVMTypeRef t = LLVMTypeOf (elem);
LLVMTypeRef init_vec_t = LLVMVectorType (t, 1);
LLVMValueRef undef = LLVMGetUndef (init_vec_t);
LLVMValueRef vec = LLVMBuildInsertElement (ctx->builder, undef, elem, const_int32 (0), "");
LLVMValueRef select_zero = LLVMConstNull (LLVMVectorType (LLVMInt32Type (), count));
return LLVMBuildShuffleVector (ctx->builder, vec, undef, select_zero, "broadcast");
}
static LLVMValueRef
broadcast_constant (int const_val, LLVMTypeRef elem_t, int count)
{
int vals [MAX_VECTOR_ELEMS];
for (int i = 0; i < count; ++i)
vals [i] = const_val;
return create_const_vector (elem_t, vals, count);
}
static LLVMValueRef
create_shift_vector (EmitContext *ctx, LLVMValueRef type_donor, LLVMValueRef shiftamt)
{
LLVMTypeRef t = LLVMTypeOf (type_donor);
unsigned int elems = LLVMGetVectorSize (t);
LLVMTypeRef elem_t = LLVMGetElementType (t);
shiftamt = convert_full (ctx, shiftamt, elem_t, TRUE);
shiftamt = broadcast_element (ctx, shiftamt, elems);
return shiftamt;
}
static LLVMTypeRef
to_integral_vector_type (LLVMTypeRef t)
{
unsigned int elems = LLVMGetVectorSize (t);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int bits = mono_llvm_get_prim_size_bits (elem_t);
return LLVMVectorType (LLVMIntType (bits), elems);
}
static LLVMValueRef
bitcast_to_integral (EmitContext *ctx, LLVMValueRef vec)
{
LLVMTypeRef src_t = LLVMTypeOf (vec);
LLVMTypeRef dst_t = to_integral_vector_type (src_t);
if (dst_t != src_t)
return LLVMBuildBitCast (ctx->builder, vec, dst_t, "bc2i");
return vec;
}
static LLVMValueRef
extract_high_elements (EmitContext *ctx, LLVMValueRef src_vec)
{
LLVMTypeRef src_t = LLVMTypeOf (src_vec);
unsigned int src_elems = LLVMGetVectorSize (src_t);
unsigned int dst_elems = src_elems / 2;
int mask [MAX_VECTOR_ELEMS] = { 0 };
for (int i = 0; i < dst_elems; ++i)
mask [i] = dst_elems + i;
return LLVMBuildShuffleVector (ctx->builder, src_vec, LLVMGetUndef (src_t), create_const_vector_i32 (mask, dst_elems), "extract_high");
}
static LLVMValueRef
keep_lowest_element (EmitContext *ctx, LLVMTypeRef dst_t, LLVMValueRef vec)
{
LLVMTypeRef t = LLVMTypeOf (vec);
g_assert (LLVMGetElementType (dst_t) == LLVMGetElementType (t));
unsigned int elems = LLVMGetVectorSize (dst_t);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
mask [0] = 0;
for (unsigned int i = 1; i < elems; ++i)
mask [i] = src_elems;
return LLVMBuildShuffleVector (ctx->builder, vec, LLVMConstNull (t), create_const_vector_i32 (mask, elems), "keep_lowest");
}
static LLVMValueRef
concatenate_vectors (EmitContext *ctx, LLVMValueRef xs, LLVMValueRef ys)
{
LLVMTypeRef t = LLVMTypeOf (xs);
unsigned int elems = LLVMGetVectorSize (t) * 2;
int mask [MAX_VECTOR_ELEMS] = { 0 };
for (int i = 0; i < elems; ++i)
mask [i] = i;
return LLVMBuildShuffleVector (ctx->builder, xs, ys, create_const_vector_i32 (mask, elems), "concat_vecs");
}
static LLVMValueRef
scalar_from_vector (EmitContext *ctx, LLVMValueRef xs)
{
return LLVMBuildExtractElement (ctx->builder, xs, const_int32 (0), "v2s");
}
static LLVMValueRef
vector_from_scalar (EmitContext *ctx, LLVMTypeRef type, LLVMValueRef x)
{
return LLVMBuildInsertElement (ctx->builder, LLVMConstNull (type), x, const_int32 (0), "s2v");
}
typedef struct {
EmitContext *ctx;
MonoBasicBlock *bb;
LLVMBasicBlockRef continuation;
LLVMValueRef phi;
LLVMValueRef switch_ins;
LLVMBasicBlockRef tmp_block;
LLVMBasicBlockRef default_case;
LLVMTypeRef switch_index_type;
const char *name;
int max_cases;
int i;
} ImmediateUnrollCtx;
static ImmediateUnrollCtx
immediate_unroll_begin (
EmitContext *ctx, MonoBasicBlock *bb, int max_cases,
LLVMValueRef switch_index, LLVMTypeRef return_type, const char *name)
{
LLVMBasicBlockRef default_case = gen_bb (ctx, name);
LLVMBasicBlockRef continuation = gen_bb (ctx, name);
LLVMValueRef switch_ins = LLVMBuildSwitch (ctx->builder, switch_index, default_case, max_cases);
LLVMPositionBuilderAtEnd (ctx->builder, continuation);
LLVMValueRef phi = LLVMBuildPhi (ctx->builder, return_type, name);
ImmediateUnrollCtx ictx = { 0 };
ictx.ctx = ctx;
ictx.bb = bb;
ictx.continuation = continuation;
ictx.phi = phi;
ictx.switch_ins = switch_ins;
ictx.default_case = default_case;
ictx.switch_index_type = LLVMTypeOf (switch_index);
ictx.name = name;
ictx.max_cases = max_cases;
return ictx;
}
static gboolean
immediate_unroll_next (ImmediateUnrollCtx *ictx, int *i)
{
if (ictx->i >= ictx->max_cases)
return FALSE;
ictx->tmp_block = gen_bb (ictx->ctx, ictx->name);
LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->tmp_block);
*i = ictx->i;
++ictx->i;
return TRUE;
}
static void
immediate_unroll_commit (ImmediateUnrollCtx *ictx, int switch_const, LLVMValueRef value)
{
LLVMBuildBr (ictx->ctx->builder, ictx->continuation);
LLVMAddCase (ictx->switch_ins, LLVMConstInt (ictx->switch_index_type, switch_const, FALSE), ictx->tmp_block);
LLVMAddIncoming (ictx->phi, &value, &ictx->tmp_block, 1);
}
static void
immediate_unroll_default (ImmediateUnrollCtx *ictx)
{
LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->default_case);
}
static void
immediate_unroll_commit_default (ImmediateUnrollCtx *ictx, LLVMValueRef value)
{
LLVMBuildBr (ictx->ctx->builder, ictx->continuation);
LLVMAddIncoming (ictx->phi, &value, &ictx->default_case, 1);
}
static void
immediate_unroll_unreachable_default (ImmediateUnrollCtx *ictx)
{
immediate_unroll_default (ictx);
LLVMBuildUnreachable (ictx->ctx->builder);
}
static LLVMValueRef
immediate_unroll_end (ImmediateUnrollCtx *ictx, LLVMBasicBlockRef *continuation)
{
EmitContext *ctx = ictx->ctx;
LLVMBuilderRef builder = ctx->builder;
LLVMPositionBuilderAtEnd (builder, ictx->continuation);
*continuation = ictx->continuation;
ctx->bblocks [ictx->bb->block_num].end_bblock = ictx->continuation;
return ictx->phi;
}
typedef struct {
EmitContext *ctx;
LLVMTypeRef intermediate_type;
LLVMTypeRef return_type;
gboolean needs_fake_scalar_op;
llvm_ovr_tag_t ovr_tag;
} ScalarOpFromVectorOpCtx;
static inline gboolean
check_needs_fake_scalar_op (MonoTypeEnum type)
{
#if defined(TARGET_ARM64)
switch (type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
return TRUE;
}
#endif
return FALSE;
}
static ScalarOpFromVectorOpCtx
scalar_op_from_vector_op (EmitContext *ctx, LLVMTypeRef return_type, MonoInst *ins)
{
ScalarOpFromVectorOpCtx ret = { 0 };
ret.ctx = ctx;
ret.intermediate_type = return_type;
ret.return_type = return_type;
ret.needs_fake_scalar_op = check_needs_fake_scalar_op (inst_c1_type (ins));
ret.ovr_tag = ovr_tag_from_llvm_type (return_type);
if (!ret.needs_fake_scalar_op) {
ret.ovr_tag = ovr_tag_force_scalar (ret.ovr_tag);
ret.intermediate_type = ovr_tag_to_llvm_type (ret.ovr_tag);
}
return ret;
}
static void
scalar_op_from_vector_op_process_args (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef *args, int num_args)
{
if (!sctx->needs_fake_scalar_op)
for (int i = 0; i < num_args; ++i)
args [i] = scalar_from_vector (sctx->ctx, args [i]);
}
static LLVMValueRef
scalar_op_from_vector_op_process_result (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef result)
{
if (sctx->needs_fake_scalar_op)
return keep_lowest_element (sctx->ctx, LLVMTypeOf (result), result);
return vector_from_scalar (sctx->ctx, sctx->return_type, result);
}
static void
emit_llvmonly_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBasicBlockRef cbb)
{
int clause_index = MONO_REGION_CLAUSE_INDEX (bb->region);
MonoExceptionClause *clause = &ctx->cfg->header->clauses [clause_index];
// Make exception available to catch blocks
if (!(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags & MONO_EXCEPTION_CLAUSE_FAULT)) {
LLVMValueRef mono_exc = mono_llvm_emit_load_exception_call (ctx, ctx->builder);
g_assert (ctx->ex_var);
LLVMBuildStore (ctx->builder, LLVMBuildBitCast (ctx->builder, mono_exc, ObjRefType (), ""), ctx->ex_var);
if (bb->in_scount == 1) {
MonoInst *exvar = bb->in_stack [0];
g_assert (!ctx->values [exvar->dreg]);
g_assert (ctx->ex_var);
ctx->values [exvar->dreg] = LLVMBuildLoad (ctx->builder, ctx->ex_var, "save_exception");
emit_volatile_store (ctx, exvar->dreg);
}
mono_llvm_emit_clear_exception_call (ctx, ctx->builder);
}
#ifdef TARGET_WASM
if (ctx->cfg->lmf_var && !ctx->cfg->deopt) {
LLVMValueRef callee;
LLVMValueRef args [1];
LLVMTypeRef sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE);
/*
* There might be an LMF on the stack inserted to enable stack walking, see
* method_needs_stack_walk (). If an exception is thrown, the LMF popping code
* is not executed, so do it here.
*/
g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]);
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_pop_lmf));
args [0] = convert (ctx, ctx->addresses [ctx->cfg->lmf_var->dreg], ctx->module->ptr_type);
emit_call (ctx, bb, &ctx->builder, callee, args, 1);
}
#endif
LLVMBuilderRef handler_builder = create_builder (ctx);
LLVMBasicBlockRef target_bb = ctx->bblocks [bb->block_num].call_handler_target_bb;
LLVMPositionBuilderAtEnd (handler_builder, target_bb);
// Make the handler code end with a jump to cbb
LLVMBuildBr (handler_builder, cbb);
}
static void
emit_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef builder)
{
MonoCompile *cfg = ctx->cfg;
LLVMValueRef *values = ctx->values;
LLVMModuleRef lmodule = ctx->lmodule;
BBInfo *bblocks = ctx->bblocks;
LLVMTypeRef i8ptr;
LLVMValueRef personality;
LLVMValueRef landing_pad;
LLVMBasicBlockRef target_bb;
MonoInst *exvar;
static int ti_generator;
char ti_name [128];
LLVMValueRef type_info;
int clause_index;
GSList *l;
// <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+
if (cfg->compile_aot) {
/* Use a dummy personality function */
personality = LLVMGetNamedFunction (lmodule, "mono_personality");
g_assert (personality);
} else {
/* Can't cache this as each method is in its own llvm module */
LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE);
personality = LLVMAddFunction (ctx->lmodule, "mono_personality", personality_type);
mono_llvm_add_func_attr (personality, LLVM_ATTR_NO_UNWIND);
LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (personality, "ENTRY");
LLVMBuilderRef builder2 = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder2, entry_bb);
LLVMBuildRet (builder2, LLVMConstInt (LLVMInt32Type (), 0, FALSE));
LLVMDisposeBuilder (builder2);
}
i8ptr = LLVMPointerType (LLVMInt8Type (), 0);
clause_index = (mono_get_block_region_notry (cfg, bb->region) >> 8) - 1;
/*
* Create the type info
*/
sprintf (ti_name, "type_info_%d", ti_generator);
ti_generator ++;
if (cfg->compile_aot) {
/* decode_eh_frame () in aot-runtime.c will decode this */
type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name);
LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE));
/*
* These symbols are not really used, the clause_index is embedded into the EH tables generated by DwarfMonoException in LLVM.
*/
LLVMSetLinkage (type_info, LLVMInternalLinkage);
} else {
type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name);
LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE));
}
{
LLVMTypeRef members [2], ret_type;
members [0] = i8ptr;
members [1] = LLVMInt32Type ();
ret_type = LLVMStructType (members, 2, FALSE);
landing_pad = LLVMBuildLandingPad (builder, ret_type, personality, 1, "");
LLVMAddClause (landing_pad, type_info);
/* Store the exception into the exvar */
if (ctx->ex_var)
LLVMBuildStore (builder, convert (ctx, LLVMBuildExtractValue (builder, landing_pad, 0, "ex_obj"), ObjRefType ()), ctx->ex_var);
}
/*
* LLVM throw sites are associated with a one landing pad, and LLVM generated
* code expects control to be transferred to this landing pad even in the
* presence of nested clauses. The landing pad needs to branch to the landing
* pads belonging to nested clauses based on the selector value returned by
* the landing pad instruction, which is passed to the landing pad in a
* register by the EH code.
*/
target_bb = bblocks [bb->block_num].call_handler_target_bb;
g_assert (target_bb);
/*
* Branch to the correct landing pad
*/
LLVMValueRef ex_selector = LLVMBuildExtractValue (builder, landing_pad, 1, "ex_selector");
LLVMValueRef switch_ins = LLVMBuildSwitch (builder, ex_selector, target_bb, 0);
for (l = ctx->nested_in [clause_index]; l; l = l->next) {
int nesting_clause_index = GPOINTER_TO_INT (l->data);
MonoBasicBlock *handler_bb;
handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (nesting_clause_index));
g_assert (handler_bb);
g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), nesting_clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
}
/* Start a new bblock which CALL_HANDLER can branch to */
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, target_bb);
ctx->bblocks [bb->block_num].end_bblock = target_bb;
/* Store the exception into the IL level exvar */
if (bb->in_scount == 1) {
g_assert (bb->in_scount == 1);
exvar = bb->in_stack [0];
// FIXME: This is shared with filter clauses ?
g_assert (!values [exvar->dreg]);
g_assert (ctx->ex_var);
values [exvar->dreg] = LLVMBuildLoad (builder, ctx->ex_var, "");
emit_volatile_store (ctx, exvar->dreg);
}
/* Make normal branches to the start of the clause branch to the new bblock */
bblocks [bb->block_num].bblock = target_bb;
}
static LLVMValueRef
get_double_const (MonoCompile *cfg, double val)
{
//#ifdef TARGET_WASM
#if 0
//Wasm requires us to canonicalize NaNs.
if (mono_isnan (val))
*(gint64 *)&val = 0x7FF8000000000000ll;
#endif
return LLVMConstReal (LLVMDoubleType (), val);
}
static LLVMValueRef
get_float_const (MonoCompile *cfg, float val)
{
//#ifdef TARGET_WASM
#if 0
if (mono_isnan (val))
*(int *)&val = 0x7FC00000;
#endif
return LLVMConstReal (LLVMFloatType (), val);
}
static LLVMValueRef
call_overloaded_intrins (EmitContext *ctx, int id, llvm_ovr_tag_t ovr_tag, LLVMValueRef *args, const char *name)
{
int key = key_from_id_and_tag (id, ovr_tag);
LLVMValueRef intrins = get_intrins (ctx, key);
int nargs = LLVMCountParamTypes (LLVMGetElementType (LLVMTypeOf (intrins)));
for (int i = 0; i < nargs; ++i) {
LLVMTypeRef t1 = LLVMTypeOf (args [i]);
LLVMTypeRef t2 = LLVMTypeOf (LLVMGetParam (intrins, i));
if (t1 != t2)
args [i] = convert (ctx, args [i], t2);
}
return LLVMBuildCall (ctx->builder, intrins, args, nargs, name);
}
static LLVMValueRef
call_intrins (EmitContext *ctx, int id, LLVMValueRef *args, const char *name)
{
return call_overloaded_intrins (ctx, id, 0, args, name);
}
static void
process_bb (EmitContext *ctx, MonoBasicBlock *bb)
{
MonoCompile *cfg = ctx->cfg;
MonoMethodSignature *sig = ctx->sig;
LLVMValueRef method = ctx->lmethod;
LLVMValueRef *values = ctx->values;
LLVMValueRef *addresses = ctx->addresses;
LLVMCallInfo *linfo = ctx->linfo;
BBInfo *bblocks = ctx->bblocks;
MonoInst *ins;
LLVMBasicBlockRef cbb;
LLVMBuilderRef builder;
gboolean has_terminator;
LLVMValueRef v;
LLVMValueRef lhs, rhs, arg3;
int nins = 0;
cbb = get_end_bb (ctx, bb);
builder = create_builder (ctx);
ctx->builder = builder;
LLVMPositionBuilderAtEnd (builder, cbb);
if (!ctx_ok (ctx))
return;
if (cfg->interp_entry_only && bb != cfg->bb_init && bb != cfg->bb_entry && bb != cfg->bb_exit) {
/* The interp entry code is in bb_entry, skip the rest as we might not be able to compile it */
LLVMBuildUnreachable (builder);
return;
}
if (bb->flags & BB_EXCEPTION_HANDLER) {
if (!ctx->llvm_only && !bblocks [bb->block_num].invoke_target) {
set_failure (ctx, "handler without invokes");
return;
}
if (ctx->llvm_only)
emit_llvmonly_handler_start (ctx, bb, cbb);
else
emit_handler_start (ctx, bb, builder);
if (!ctx_ok (ctx))
return;
builder = ctx->builder;
}
/* Handle PHI nodes first */
/* They should be grouped at the start of the bb */
for (ins = bb->code; ins; ins = ins->next) {
emit_dbg_loc (ctx, builder, ins->cil_code);
if (ins->opcode == OP_NOP)
continue;
if (!MONO_IS_PHI (ins))
break;
if (cfg->interp_entry_only)
break;
int i;
gboolean empty = TRUE;
/* Check that all input bblocks really branch to us */
for (i = 0; i < bb->in_count; ++i) {
if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_NOT_REACHED)
ins->inst_phi_args [i + 1] = -1;
else
empty = FALSE;
}
if (empty) {
/* LLVM doesn't like phi instructions with zero operands */
ctx->is_dead [ins->dreg] = TRUE;
continue;
}
/* Created earlier, insert it now */
LLVMInsertIntoBuilder (builder, values [ins->dreg]);
for (i = 0; i < ins->inst_phi_args [0]; i++) {
int sreg1 = ins->inst_phi_args [i + 1];
int count, j;
/*
* Count the number of times the incoming bblock branches to us,
* since llvm requires a separate entry for each.
*/
if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_SWITCH) {
MonoInst *switch_ins = bb->in_bb [i]->last_ins;
count = 0;
for (j = 0; j < GPOINTER_TO_UINT (switch_ins->klass); ++j) {
if (switch_ins->inst_many_bb [j] == bb)
count ++;
}
} else {
count = 1;
}
/* Remember for later */
for (j = 0; j < count; ++j) {
PhiNode *node = (PhiNode*)mono_mempool_alloc0 (ctx->mempool, sizeof (PhiNode));
node->bb = bb;
node->phi = ins;
node->in_bb = bb->in_bb [i];
node->sreg = sreg1;
bblocks [bb->in_bb [i]->block_num].phi_nodes = g_slist_prepend_mempool (ctx->mempool, bblocks [bb->in_bb [i]->block_num].phi_nodes, node);
}
}
}
// Add volatile stores for PHI nodes
// These need to be emitted after the PHI nodes
for (ins = bb->code; ins; ins = ins->next) {
const char *spec = LLVM_INS_INFO (ins->opcode);
if (ins->opcode == OP_NOP)
continue;
if (!MONO_IS_PHI (ins))
break;
if (spec [MONO_INST_DEST] != 'v')
emit_volatile_store (ctx, ins->dreg);
}
has_terminator = FALSE;
for (ins = bb->code; ins; ins = ins->next) {
const char *spec = LLVM_INS_INFO (ins->opcode);
char *dname = NULL;
char dname_buf [128];
emit_dbg_loc (ctx, builder, ins->cil_code);
nins ++;
if (nins > 1000) {
/*
* Some steps in llc are non-linear in the size of basic blocks, see #5714.
* Start a new bblock.
* Prevent the bblocks to be merged by doing a volatile load + cond branch
* from localloc-ed memory.
*/
if (!cfg->llvm_only)
;//set_failure (ctx, "basic block too long");
if (!ctx->long_bb_break_var) {
ctx->long_bb_break_var = build_alloca_llvm_type_name (ctx, LLVMInt32Type (), 0, "long_bb_break");
mono_llvm_build_store (ctx->alloca_builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE);
}
cbb = gen_bb (ctx, "CONT_LONG_BB");
LLVMBasicBlockRef dummy_bb = gen_bb (ctx, "CONT_LONG_BB_DUMMY");
LLVMValueRef load = mono_llvm_build_load (builder, ctx->long_bb_break_var, "", TRUE);
/*
* The long_bb_break_var is initialized to 0 in the prolog, so this branch will always go to 'cbb'
* but llvm doesn't know that, so the branch is not going to be eliminated.
*/
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, load, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMBuildCondBr (builder, cmp, cbb, dummy_bb);
/* Emit a dummy false bblock which does nothing but contains a volatile store so it cannot be eliminated */
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, dummy_bb);
mono_llvm_build_store (builder, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE);
LLVMBuildBr (builder, cbb);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, cbb);
ctx->bblocks [bb->block_num].end_bblock = cbb;
nins = 0;
emit_dbg_loc (ctx, builder, ins->cil_code);
}
if (has_terminator)
/* There could be instructions after a terminator, skip them */
break;
if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins)) {
sprintf (dname_buf, "t%d", ins->dreg);
dname = dname_buf;
}
if (spec [MONO_INST_SRC1] != ' ' && spec [MONO_INST_SRC1] != 'v') {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) && var->opcode != OP_GSHAREDVT_ARG_REGOFFSET) {
lhs = emit_volatile_load (ctx, ins->sreg1);
} else {
/* It is ok for SETRET to have an uninitialized argument */
if (!values [ins->sreg1] && ins->opcode != OP_SETRET) {
set_failure (ctx, "sreg1");
return;
}
lhs = values [ins->sreg1];
}
} else {
lhs = NULL;
}
if (spec [MONO_INST_SRC2] != ' ' && spec [MONO_INST_SRC2] != 'v') {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg2);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) {
rhs = emit_volatile_load (ctx, ins->sreg2);
} else {
if (!values [ins->sreg2]) {
set_failure (ctx, "sreg2");
return;
}
rhs = values [ins->sreg2];
}
} else {
rhs = NULL;
}
if (spec [MONO_INST_SRC3] != ' ' && spec [MONO_INST_SRC3] != 'v') {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg3);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) {
arg3 = emit_volatile_load (ctx, ins->sreg3);
} else {
if (!values [ins->sreg3]) {
set_failure (ctx, "sreg3");
return;
}
arg3 = values [ins->sreg3];
}
} else {
arg3 = NULL;
}
//mono_print_ins (ins);
gboolean skip_volatile_store = FALSE;
switch (ins->opcode) {
case OP_NOP:
case OP_NOT_NULL:
case OP_LIVERANGE_START:
case OP_LIVERANGE_END:
break;
case OP_ICONST:
values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE);
break;
case OP_I8CONST:
#if TARGET_SIZEOF_VOID_P == 4
values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
#else
values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), (gint64)ins->inst_c0, FALSE);
#endif
break;
case OP_R8CONST:
values [ins->dreg] = get_double_const (cfg, *(double*)ins->inst_p0);
break;
case OP_R4CONST:
values [ins->dreg] = get_float_const (cfg, *(float*)ins->inst_p0);
break;
case OP_DUMMY_ICONST:
values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
break;
case OP_DUMMY_I8CONST:
values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), 0, FALSE);
break;
case OP_DUMMY_R8CONST:
values [ins->dreg] = LLVMConstReal (LLVMDoubleType (), 0.0f);
break;
case OP_BR: {
LLVMBasicBlockRef target_bb = get_bb (ctx, ins->inst_target_bb);
LLVMBuildBr (builder, target_bb);
has_terminator = TRUE;
break;
}
case OP_SWITCH: {
int i;
LLVMValueRef v;
char bb_name [128];
LLVMBasicBlockRef new_bb;
LLVMBuilderRef new_builder;
// The default branch is already handled
// FIXME: Handle it here
/* Start new bblock */
sprintf (bb_name, "SWITCH_DEFAULT_BB%d", ctx->default_index ++);
new_bb = LLVMAppendBasicBlock (ctx->lmethod, bb_name);
lhs = convert (ctx, lhs, LLVMInt32Type ());
v = LLVMBuildSwitch (builder, lhs, new_bb, GPOINTER_TO_UINT (ins->klass));
for (i = 0; i < GPOINTER_TO_UINT (ins->klass); ++i) {
MonoBasicBlock *target_bb = ins->inst_many_bb [i];
LLVMAddCase (v, LLVMConstInt (LLVMInt32Type (), i, FALSE), get_bb (ctx, target_bb));
}
new_builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (new_builder, new_bb);
LLVMBuildUnreachable (new_builder);
has_terminator = TRUE;
g_assert (!ins->next);
break;
}
case OP_SETRET:
switch (linfo->ret.storage) {
case LLVMArgNormal:
case LLVMArgVtypeInReg:
case LLVMArgVtypeAsScalar:
case LLVMArgWasmVtypeAsScalar: {
LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method)));
LLVMValueRef retval = LLVMGetUndef (ret_type);
gboolean src_in_reg = FALSE;
gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret));
switch (linfo->ret.storage) {
case LLVMArgNormal: src_in_reg = TRUE; break;
case LLVMArgVtypeInReg: case LLVMArgVtypeAsScalar: src_in_reg = is_simd; break;
}
if (src_in_reg && (!lhs || ctx->is_dead [ins->sreg1])) {
/*
* The method did not set its return value, probably because it
* ends with a throw.
*/
LLVMBuildRet (builder, retval);
break;
}
switch (linfo->ret.storage) {
case LLVMArgNormal:
retval = convert (ctx, lhs, type_to_llvm_type (ctx, sig->ret));
break;
case LLVMArgVtypeInReg:
if (is_simd) {
/* The return type is an LLVM aggregate type, so a bare bitcast cannot be used to do this conversion. */
int width = mono_type_size (sig->ret, NULL);
int elems = width / TARGET_SIZEOF_VOID_P;
/* The return value might not be set if there is a throw */
LLVMValueRef val = LLVMBuildBitCast (builder, lhs, LLVMVectorType (IntPtrType (), elems), "");
for (int i = 0; i < elems; ++i) {
LLVMValueRef element = LLVMBuildExtractElement (builder, val, const_int32 (i), "");
retval = LLVMBuildInsertValue (builder, retval, element, i, "setret_simd_vtype_in_reg");
}
} else {
LLVMValueRef addr = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), "");
for (int i = 0; i < 2; ++i) {
if (linfo->ret.pair_storage [i] == LLVMArgInIReg) {
LLVMValueRef indexes [2], part_addr;
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), i, FALSE);
part_addr = LLVMBuildGEP (builder, addr, indexes, 2, "");
retval = LLVMBuildInsertValue (builder, retval, LLVMBuildLoad (builder, part_addr, ""), i, "");
} else {
g_assert (linfo->ret.pair_storage [i] == LLVMArgNone);
}
}
}
break;
case LLVMArgVtypeAsScalar:
if (is_simd) {
retval = LLVMBuildBitCast (builder, values [ins->sreg1], ret_type, "setret_simd_vtype_as_scalar");
} else {
g_assert (addresses [ins->sreg1]);
retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), "");
}
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (addresses [ins->sreg1]);
retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), "");
break;
}
LLVMBuildRet (builder, retval);
break;
}
case LLVMArgVtypeByRef: {
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgGsharedvtFixed: {
LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret);
/* The return value is in lhs, need to store to the vret argument */
/* sreg1 might not be set */
if (lhs) {
g_assert (cfg->vret_addr);
g_assert (values [cfg->vret_addr->dreg]);
LLVMBuildStore (builder, convert (ctx, lhs, ret_type), convert (ctx, values [cfg->vret_addr->dreg], LLVMPointerType (ret_type, 0)));
}
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgGsharedvtFixedVtype: {
/* Already set */
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgGsharedvtVariable: {
/* Already set */
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgVtypeRetAddr: {
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgAsIArgs:
case LLVMArgFpStruct: {
LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method)));
LLVMValueRef retval;
g_assert (addresses [ins->sreg1]);
retval = LLVMBuildLoad (builder, convert (ctx, addresses [ins->sreg1], LLVMPointerType (ret_type, 0)), "");
LLVMBuildRet (builder, retval);
break;
}
case LLVMArgNone:
LLVMBuildRetVoid (builder);
break;
default:
g_assert_not_reached ();
break;
}
has_terminator = TRUE;
break;
case OP_ICOMPARE:
case OP_FCOMPARE:
case OP_RCOMPARE:
case OP_LCOMPARE:
case OP_COMPARE:
case OP_ICOMPARE_IMM:
case OP_LCOMPARE_IMM:
case OP_COMPARE_IMM: {
CompRelation rel;
LLVMValueRef cmp, args [16];
gboolean likely = (ins->flags & MONO_INST_LIKELY) != 0;
gboolean unlikely = FALSE;
if (MONO_IS_COND_BRANCH_OP (ins->next)) {
if (ins->next->inst_false_bb->out_of_line)
likely = TRUE;
else if (ins->next->inst_true_bb->out_of_line)
unlikely = TRUE;
}
if (ins->next->opcode == OP_NOP)
break;
if (ins->next->opcode == OP_BR)
/* The comparison result is not needed */
continue;
rel = mono_opcode_to_cond (ins->next->opcode);
if (ins->opcode == OP_ICOMPARE_IMM) {
lhs = convert (ctx, lhs, LLVMInt32Type ());
rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
}
if (ins->opcode == OP_LCOMPARE_IMM) {
lhs = convert (ctx, lhs, LLVMInt64Type ());
rhs = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
}
if (ins->opcode == OP_LCOMPARE) {
lhs = convert (ctx, lhs, LLVMInt64Type ());
rhs = convert (ctx, rhs, LLVMInt64Type ());
}
if (ins->opcode == OP_ICOMPARE) {
lhs = convert (ctx, lhs, LLVMInt32Type ());
rhs = convert (ctx, rhs, LLVMInt32Type ());
}
if (lhs && rhs) {
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind)
rhs = convert (ctx, rhs, LLVMTypeOf (lhs));
else if (LLVMGetTypeKind (LLVMTypeOf (rhs)) == LLVMPointerTypeKind)
lhs = convert (ctx, lhs, LLVMTypeOf (rhs));
}
/* We use COMPARE+SETcc/Bcc, llvm uses SETcc+br cond */
if (ins->opcode == OP_FCOMPARE) {
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), "");
} else if (ins->opcode == OP_RCOMPARE) {
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), "");
} else if (ins->opcode == OP_COMPARE_IMM) {
LLVMIntPredicate llvm_pred = cond_to_llvm_cond [rel];
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && ins->inst_imm == 0) {
// We are emitting a NULL check for a pointer
gboolean nonnull = mono_llvm_is_nonnull (lhs);
if (nonnull && llvm_pred == LLVMIntEQ)
cmp = LLVMConstInt (LLVMInt1Type (), FALSE, FALSE);
else if (nonnull && llvm_pred == LLVMIntNE)
cmp = LLVMConstInt (LLVMInt1Type (), TRUE, FALSE);
else
cmp = LLVMBuildICmp (builder, llvm_pred, lhs, LLVMConstNull (LLVMTypeOf (lhs)), "");
} else {
cmp = LLVMBuildICmp (builder, llvm_pred, convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), "");
}
} else if (ins->opcode == OP_LCOMPARE_IMM) {
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
}
else if (ins->opcode == OP_COMPARE) {
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && LLVMTypeOf (lhs) == LLVMTypeOf (rhs))
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
else
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), "");
} else
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
if (likely || unlikely) {
args [0] = cmp;
args [1] = LLVMConstInt (LLVMInt1Type (), likely ? 1 : 0, FALSE);
cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, "");
}
if (MONO_IS_COND_BRANCH_OP (ins->next)) {
if (ins->next->inst_true_bb == ins->next->inst_false_bb) {
/*
* If the target bb contains PHI instructions, LLVM requires
* two PHI entries for this bblock, while we only generate one.
* So convert this to an unconditional bblock. (bxc #171).
*/
LLVMBuildBr (builder, get_bb (ctx, ins->next->inst_true_bb));
} else {
LLVMBuildCondBr (builder, cmp, get_bb (ctx, ins->next->inst_true_bb), get_bb (ctx, ins->next->inst_false_bb));
}
has_terminator = TRUE;
} else if (MONO_IS_SETCC (ins->next)) {
sprintf (dname_buf, "t%d", ins->next->dreg);
dname = dname_buf;
values [ins->next->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
/* Add stores for volatile variables */
emit_volatile_store (ctx, ins->next->dreg);
} else if (MONO_IS_COND_EXC (ins->next)) {
gboolean force_explicit_branch = FALSE;
if (bb->region != -1) {
/* Don't tag null check branches in exception-handling
* regions with `make.implicit`.
*/
force_explicit_branch = TRUE;
}
emit_cond_system_exception (ctx, bb, (const char*)ins->next->inst_p1, cmp, force_explicit_branch);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
} else {
set_failure (ctx, "next");
break;
}
ins = ins->next;
break;
}
case OP_FCEQ:
case OP_FCNEQ:
case OP_FCLT:
case OP_FCLT_UN:
case OP_FCGT:
case OP_FCGT_UN:
case OP_FCGE:
case OP_FCLE: {
CompRelation rel;
LLVMValueRef cmp;
rel = mono_opcode_to_cond (ins->opcode);
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
break;
}
case OP_RCEQ:
case OP_RCNEQ:
case OP_RCLT:
case OP_RCLT_UN:
case OP_RCGT:
case OP_RCGT_UN: {
CompRelation rel;
LLVMValueRef cmp;
rel = mono_opcode_to_cond (ins->opcode);
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
break;
}
case OP_PHI:
case OP_FPHI:
case OP_VPHI:
case OP_XPHI: {
// Handled above
skip_volatile_store = TRUE;
break;
}
case OP_MOVE:
case OP_LMOVE:
case OP_XMOVE:
case OP_SETFRET:
g_assert (lhs);
values [ins->dreg] = lhs;
break;
case OP_FMOVE:
case OP_RMOVE: {
MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
g_assert (lhs);
values [ins->dreg] = lhs;
if (var && m_class_get_byval_arg (var->klass)->type == MONO_TYPE_R4) {
/*
* This is added by the spilling pass in case of the JIT,
* but we have to do it ourselves.
*/
values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ());
}
break;
}
case OP_MOVE_F_TO_I4: {
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), ""), LLVMInt32Type (), "");
break;
}
case OP_MOVE_I4_TO_F: {
values [ins->dreg] = LLVMBuildFPExt (builder, LLVMBuildBitCast (builder, lhs, LLVMFloatType (), ""), LLVMDoubleType (), "");
break;
}
case OP_MOVE_F_TO_I8: {
values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMInt64Type (), "");
break;
}
case OP_MOVE_I8_TO_F: {
values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMDoubleType (), "");
break;
}
case OP_IADD:
case OP_ISUB:
case OP_IAND:
case OP_IMUL:
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
case OP_IOR:
case OP_IXOR:
case OP_ISHL:
case OP_ISHR:
case OP_ISHR_UN:
case OP_FADD:
case OP_FSUB:
case OP_FMUL:
case OP_FDIV:
case OP_LADD:
case OP_LSUB:
case OP_LMUL:
case OP_LDIV:
case OP_LDIV_UN:
case OP_LREM:
case OP_LREM_UN:
case OP_LAND:
case OP_LOR:
case OP_LXOR:
case OP_LSHL:
case OP_LSHR:
case OP_LSHR_UN:
lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
emit_div_check (ctx, builder, bb, ins, lhs, rhs);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
switch (ins->opcode) {
case OP_IADD:
case OP_LADD:
values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, dname);
break;
case OP_ISUB:
case OP_LSUB:
values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, dname);
break;
case OP_IMUL:
case OP_LMUL:
values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, dname);
break;
case OP_IREM:
case OP_LREM:
values [ins->dreg] = LLVMBuildSRem (builder, lhs, rhs, dname);
break;
case OP_IREM_UN:
case OP_LREM_UN:
values [ins->dreg] = LLVMBuildURem (builder, lhs, rhs, dname);
break;
case OP_IDIV:
case OP_LDIV:
values [ins->dreg] = LLVMBuildSDiv (builder, lhs, rhs, dname);
break;
case OP_IDIV_UN:
case OP_LDIV_UN:
values [ins->dreg] = LLVMBuildUDiv (builder, lhs, rhs, dname);
break;
case OP_FDIV:
case OP_RDIV:
values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname);
break;
case OP_IAND:
case OP_LAND:
values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, dname);
break;
case OP_IOR:
case OP_LOR:
values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, dname);
break;
case OP_IXOR:
case OP_LXOR:
values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, dname);
break;
case OP_ISHL:
case OP_LSHL:
values [ins->dreg] = LLVMBuildShl (builder, lhs, rhs, dname);
break;
case OP_ISHR:
case OP_LSHR:
values [ins->dreg] = LLVMBuildAShr (builder, lhs, rhs, dname);
break;
case OP_ISHR_UN:
case OP_LSHR_UN:
values [ins->dreg] = LLVMBuildLShr (builder, lhs, rhs, dname);
break;
case OP_FADD:
values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname);
break;
case OP_FSUB:
values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname);
break;
case OP_FMUL:
values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname);
break;
default:
g_assert_not_reached ();
}
break;
case OP_RADD:
case OP_RSUB:
case OP_RMUL:
case OP_RDIV: {
lhs = convert (ctx, lhs, LLVMFloatType ());
rhs = convert (ctx, rhs, LLVMFloatType ());
switch (ins->opcode) {
case OP_RADD:
values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname);
break;
case OP_RSUB:
values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname);
break;
case OP_RMUL:
values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname);
break;
case OP_RDIV:
values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname);
break;
default:
g_assert_not_reached ();
break;
}
break;
}
case OP_IADD_IMM:
case OP_ISUB_IMM:
case OP_IMUL_IMM:
case OP_IREM_IMM:
case OP_IREM_UN_IMM:
case OP_IDIV_IMM:
case OP_IDIV_UN_IMM:
case OP_IAND_IMM:
case OP_IOR_IMM:
case OP_IXOR_IMM:
case OP_ISHL_IMM:
case OP_ISHR_IMM:
case OP_ISHR_UN_IMM:
case OP_LADD_IMM:
case OP_LSUB_IMM:
case OP_LMUL_IMM:
case OP_LREM_IMM:
case OP_LAND_IMM:
case OP_LOR_IMM:
case OP_LXOR_IMM:
case OP_LSHL_IMM:
case OP_LSHR_IMM:
case OP_LSHR_UN_IMM:
case OP_ADD_IMM:
case OP_AND_IMM:
case OP_MUL_IMM:
case OP_SHL_IMM:
case OP_SHR_IMM:
case OP_SHR_UN_IMM: {
LLVMValueRef imm;
if (spec [MONO_INST_SRC1] == 'l') {
imm = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
} else {
imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
}
emit_div_check (ctx, builder, bb, ins, lhs, imm);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
#if TARGET_SIZEOF_VOID_P == 4
if (ins->opcode == OP_LSHL_IMM || ins->opcode == OP_LSHR_IMM || ins->opcode == OP_LSHR_UN_IMM)
imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
#endif
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind)
lhs = convert (ctx, lhs, IntPtrType ());
imm = convert (ctx, imm, LLVMTypeOf (lhs));
switch (ins->opcode) {
case OP_IADD_IMM:
case OP_LADD_IMM:
case OP_ADD_IMM:
values [ins->dreg] = LLVMBuildAdd (builder, lhs, imm, dname);
break;
case OP_ISUB_IMM:
case OP_LSUB_IMM:
values [ins->dreg] = LLVMBuildSub (builder, lhs, imm, dname);
break;
case OP_IMUL_IMM:
case OP_MUL_IMM:
case OP_LMUL_IMM:
values [ins->dreg] = LLVMBuildMul (builder, lhs, imm, dname);
break;
case OP_IDIV_IMM:
case OP_LDIV_IMM:
values [ins->dreg] = LLVMBuildSDiv (builder, lhs, imm, dname);
break;
case OP_IDIV_UN_IMM:
case OP_LDIV_UN_IMM:
values [ins->dreg] = LLVMBuildUDiv (builder, lhs, imm, dname);
break;
case OP_IREM_IMM:
case OP_LREM_IMM:
values [ins->dreg] = LLVMBuildSRem (builder, lhs, imm, dname);
break;
case OP_IREM_UN_IMM:
values [ins->dreg] = LLVMBuildURem (builder, lhs, imm, dname);
break;
case OP_IAND_IMM:
case OP_LAND_IMM:
case OP_AND_IMM:
values [ins->dreg] = LLVMBuildAnd (builder, lhs, imm, dname);
break;
case OP_IOR_IMM:
case OP_LOR_IMM:
values [ins->dreg] = LLVMBuildOr (builder, lhs, imm, dname);
break;
case OP_IXOR_IMM:
case OP_LXOR_IMM:
values [ins->dreg] = LLVMBuildXor (builder, lhs, imm, dname);
break;
case OP_ISHL_IMM:
case OP_LSHL_IMM:
values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname);
break;
case OP_SHL_IMM:
if (TARGET_SIZEOF_VOID_P == 8) {
/* The IL is not regular */
lhs = convert (ctx, lhs, LLVMInt64Type ());
imm = convert (ctx, imm, LLVMInt64Type ());
}
values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname);
break;
case OP_ISHR_IMM:
case OP_LSHR_IMM:
case OP_SHR_IMM:
values [ins->dreg] = LLVMBuildAShr (builder, lhs, imm, dname);
break;
case OP_ISHR_UN_IMM:
/* This is used to implement conv.u4, so the lhs could be an i8 */
lhs = convert (ctx, lhs, LLVMInt32Type ());
imm = convert (ctx, imm, LLVMInt32Type ());
values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname);
break;
case OP_LSHR_UN_IMM:
case OP_SHR_UN_IMM:
values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname);
break;
default:
g_assert_not_reached ();
}
break;
}
case OP_INEG:
values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname);
break;
case OP_LNEG:
if (LLVMTypeOf (lhs) != LLVMInt64Type ())
lhs = convert (ctx, lhs, LLVMInt64Type ());
values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt64Type (), 0, FALSE), lhs, dname);
break;
case OP_FNEG:
lhs = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname);
break;
case OP_RNEG:
lhs = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname);
break;
case OP_INOT: {
guint32 v = 0xffffffff;
values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt32Type (), v, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname);
break;
}
case OP_LNOT: {
if (LLVMTypeOf (lhs) != LLVMInt64Type ())
lhs = convert (ctx, lhs, LLVMInt64Type ());
guint64 v = 0xffffffffffffffffLL;
values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt64Type (), v, FALSE), lhs, dname);
break;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_X86_LEA: {
LLVMValueRef v1, v2;
rhs = LLVMBuildSExt (builder, convert (ctx, rhs, LLVMInt32Type ()), LLVMInt64Type (), "");
v1 = LLVMBuildMul (builder, convert (ctx, rhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ((unsigned long long)1 << ins->backend.shift_amount), FALSE), "");
v2 = LLVMBuildAdd (builder, convert (ctx, lhs, IntPtrType ()), v1, "");
values [ins->dreg] = LLVMBuildAdd (builder, v2, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), dname);
break;
}
case OP_X86_BSF32:
case OP_X86_BSF64: {
LLVMValueRef args [] = {
lhs,
LLVMConstInt (LLVMInt1Type (), 1, TRUE),
};
int op = ins->opcode == OP_X86_BSF32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64;
values [ins->dreg] = call_intrins (ctx, op, args, dname);
break;
}
case OP_X86_BSR32:
case OP_X86_BSR64: {
LLVMValueRef args [] = {
lhs,
LLVMConstInt (LLVMInt1Type (), 1, TRUE),
};
int op = ins->opcode == OP_X86_BSR32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64;
LLVMValueRef width = ins->opcode == OP_X86_BSR32 ? const_int32 (31) : const_int64 (63);
LLVMValueRef tz = call_intrins (ctx, op, args, "");
values [ins->dreg] = LLVMBuildXor (builder, tz, width, dname);
break;
}
#endif
case OP_ICONV_TO_I1:
case OP_ICONV_TO_I2:
case OP_ICONV_TO_I4:
case OP_ICONV_TO_U1:
case OP_ICONV_TO_U2:
case OP_ICONV_TO_U4:
case OP_LCONV_TO_I1:
case OP_LCONV_TO_I2:
case OP_LCONV_TO_U1:
case OP_LCONV_TO_U2:
case OP_LCONV_TO_U4: {
gboolean sign;
sign = (ins->opcode == OP_ICONV_TO_I1) || (ins->opcode == OP_ICONV_TO_I2) || (ins->opcode == OP_ICONV_TO_I4) || (ins->opcode == OP_LCONV_TO_I1) || (ins->opcode == OP_LCONV_TO_I2);
/* Have to do two casts since our vregs have type int */
v = LLVMBuildTrunc (builder, lhs, op_to_llvm_type (ins->opcode), "");
if (sign)
values [ins->dreg] = LLVMBuildSExt (builder, v, LLVMInt32Type (), dname);
else
values [ins->dreg] = LLVMBuildZExt (builder, v, LLVMInt32Type (), dname);
break;
}
case OP_ICONV_TO_I8:
values [ins->dreg] = LLVMBuildSExt (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_ICONV_TO_U8:
values [ins->dreg] = LLVMBuildZExt (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_FCONV_TO_I4:
case OP_RCONV_TO_I4:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_FCONV_TO_I1:
case OP_RCONV_TO_I1:
values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt8Type (), dname), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_U1:
case OP_RCONV_TO_U1:
values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildTrunc (builder, LLVMBuildFPToUI (builder, lhs, IntPtrType (), dname), LLVMInt8Type (), ""), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_I2:
case OP_RCONV_TO_I2:
values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_U2:
case OP_RCONV_TO_U2:
values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildFPToUI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_U4:
case OP_RCONV_TO_U4:
values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_FCONV_TO_U8:
case OP_RCONV_TO_U8:
values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_FCONV_TO_I8:
case OP_RCONV_TO_I8:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_FCONV_TO_I:
case OP_RCONV_TO_I:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, IntPtrType (), dname);
break;
case OP_ICONV_TO_R8:
case OP_LCONV_TO_R8:
values [ins->dreg] = LLVMBuildSIToFP (builder, lhs, LLVMDoubleType (), dname);
break;
case OP_ICONV_TO_R_UN:
case OP_LCONV_TO_R_UN:
values [ins->dreg] = LLVMBuildUIToFP (builder, lhs, LLVMDoubleType (), dname);
break;
#if TARGET_SIZEOF_VOID_P == 4
case OP_LCONV_TO_U:
#endif
case OP_LCONV_TO_I4:
values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_ICONV_TO_R4:
case OP_LCONV_TO_R4:
v = LLVMBuildSIToFP (builder, lhs, LLVMFloatType (), "");
values [ins->dreg] = v;
break;
case OP_FCONV_TO_R4:
v = LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), "");
values [ins->dreg] = v;
break;
case OP_RCONV_TO_R8:
values [ins->dreg] = LLVMBuildFPExt (builder, lhs, LLVMDoubleType (), dname);
break;
case OP_RCONV_TO_R4:
values [ins->dreg] = lhs;
break;
case OP_SEXT_I4:
values [ins->dreg] = LLVMBuildSExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname);
break;
case OP_ZEXT_I4:
values [ins->dreg] = LLVMBuildZExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname);
break;
case OP_TRUNC_I4:
values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_LOCALLOC_IMM: {
LLVMValueRef v;
guint32 size = ins->inst_imm;
size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
v = mono_llvm_build_alloca (builder, LLVMInt8Type (), LLVMConstInt (LLVMInt32Type (), size, FALSE), MONO_ARCH_FRAME_ALIGNMENT, "");
if (ins->flags & MONO_INST_INIT)
emit_memset (ctx, builder, v, const_int32 (size), MONO_ARCH_FRAME_ALIGNMENT);
values [ins->dreg] = v;
break;
}
case OP_LOCALLOC: {
LLVMValueRef v, size;
size = LLVMBuildAnd (builder, LLVMBuildAdd (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), MONO_ARCH_FRAME_ALIGNMENT - 1, FALSE), ""), LLVMConstInt (LLVMInt32Type (), ~ (MONO_ARCH_FRAME_ALIGNMENT - 1), FALSE), "");
v = mono_llvm_build_alloca (builder, LLVMInt8Type (), size, MONO_ARCH_FRAME_ALIGNMENT, "");
if (ins->flags & MONO_INST_INIT)
emit_memset (ctx, builder, v, size, MONO_ARCH_FRAME_ALIGNMENT);
values [ins->dreg] = v;
break;
}
case OP_LOADI1_MEMBASE:
case OP_LOADU1_MEMBASE:
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADI8_MEMBASE:
case OP_LOADR4_MEMBASE:
case OP_LOADR8_MEMBASE:
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEM:
case OP_LOADU1_MEM:
case OP_LOADU2_MEM:
case OP_LOADI4_MEM:
case OP_LOADU4_MEM:
case OP_LOAD_MEM: {
int size = 8;
LLVMValueRef base, index, addr;
LLVMTypeRef t;
gboolean sext = FALSE, zext = FALSE;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0;
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
if (sext || zext)
dname = (char*)"";
if ((ins->opcode == OP_LOADI8_MEM) || (ins->opcode == OP_LOAD_MEM) || (ins->opcode == OP_LOADI4_MEM) || (ins->opcode == OP_LOADU4_MEM) || (ins->opcode == OP_LOADU1_MEM) || (ins->opcode == OP_LOADU2_MEM)) {
addr = LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE);
base = addr;
} else {
/* _MEMBASE */
base = lhs;
if (ins->inst_offset == 0) {
LLVMValueRef gep_base, gep_offset;
if (mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) {
addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, "");
} else {
addr = base;
}
} else if (ins->inst_offset % size != 0) {
/* Unaligned load */
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
} else {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
}
}
addr = convert (ctx, addr, LLVMPointerType (t, 0));
if (is_unaligned)
values [ins->dreg] = mono_llvm_build_aligned_load (builder, addr, dname, is_volatile, 1);
else
values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, base, dname, is_faulting, is_volatile, LLVM_BARRIER_NONE);
if (!(is_faulting || is_volatile) && (ins->flags & MONO_INST_INVARIANT_LOAD)) {
/*
* These will signal LLVM that these loads do not alias any stores, and
* they can't fail, allowing them to be hoisted out of loops.
*/
set_invariant_load_flag (values [ins->dreg]);
}
if (sext)
values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
else if (zext)
values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
break;
}
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI2_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG:
case OP_STORER4_MEMBASE_REG:
case OP_STORER8_MEMBASE_REG:
case OP_STORE_MEMBASE_REG: {
int size = 8;
LLVMValueRef index, addr, base;
LLVMTypeRef t;
gboolean sext = FALSE, zext = FALSE;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0;
if (!values [ins->inst_destbasereg]) {
set_failure (ctx, "inst_destbasereg");
break;
}
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
base = values [ins->inst_destbasereg];
LLVMValueRef gep_base, gep_offset;
if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) {
addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, "");
} else if (ins->inst_offset % size != 0) {
/* Unaligned store */
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
} else {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
}
if (is_volatile && LLVMGetInstructionOpcode (base) == LLVMAlloca && !(ins->flags & MONO_INST_VOLATILE))
/* Storing to an alloca cannot fail */
is_volatile = FALSE;
LLVMValueRef srcval = convert (ctx, values [ins->sreg1], t);
LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0));
if (is_unaligned)
mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1);
else
emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile);
break;
}
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM:
case OP_STORE_MEMBASE_IMM: {
int size = 8;
LLVMValueRef index, addr, base;
LLVMTypeRef t;
gboolean sext = FALSE, zext = FALSE;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0;
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
base = values [ins->inst_destbasereg];
LLVMValueRef gep_base, gep_offset;
if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) {
addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, "");
} else if (ins->inst_offset % size != 0) {
/* Unaligned store */
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
} else {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
}
LLVMValueRef srcval = convert (ctx, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), t);
LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0));
if (is_unaligned)
mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1);
else
emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile);
break;
}
case OP_CHECK_THIS:
emit_load (ctx, bb, &builder, TARGET_SIZEOF_VOID_P, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), lhs, "", TRUE, FALSE, LLVM_BARRIER_NONE);
break;
case OP_OUTARG_VTRETADDR:
break;
case OP_VOIDCALL:
case OP_CALL:
case OP_LCALL:
case OP_FCALL:
case OP_RCALL:
case OP_VCALL:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
case OP_LCALL_REG:
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_VCALL_REG: {
process_call (ctx, bb, &builder, ins);
break;
}
case OP_AOTCONST: {
MonoJumpInfoType ji_type = ins->inst_c1;
gpointer ji_data = ins->inst_p0;
if (ji_type == MONO_PATCH_INFO_ICALL_ADDR) {
char *symbol = mono_aot_get_direct_call_symbol (MONO_PATCH_INFO_ICALL_ADDR_CALL, ji_data);
if (symbol) {
/*
* Avoid emitting a got entry for these since the method is directly called, and it might not be
* resolvable at runtime using dlsym ().
*/
g_free (symbol);
values [ins->dreg] = LLVMConstInt (IntPtrType (), 0, FALSE);
break;
}
}
values [ins->dreg] = get_aotconst (ctx, ji_type, ji_data, LLVMPointerType (IntPtrType (), 0));
break;
}
case OP_MEMMOVE: {
int argn = 0;
LLVMValueRef args [5];
args [argn++] = convert (ctx, values [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0));
args [argn++] = convert (ctx, values [ins->sreg2], LLVMPointerType (LLVMInt8Type (), 0));
args [argn++] = convert (ctx, values [ins->sreg3], LLVMInt64Type ());
args [argn++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); // is_volatile
call_intrins (ctx, INTRINS_MEMMOVE, args, "");
break;
}
case OP_NOT_REACHED:
LLVMBuildUnreachable (builder);
has_terminator = TRUE;
g_assert (bb->block_num < cfg->max_block_num);
ctx->unreachable [bb->block_num] = TRUE;
/* Might have instructions after this */
while (ins->next) {
MonoInst *next = ins->next;
/*
* FIXME: If later code uses the regs defined by these instructions,
* compilation will fail.
*/
const char *spec = INS_INFO (next->opcode);
if (spec [MONO_INST_DEST] == 'i' && !MONO_IS_STORE_MEMBASE (next))
ctx->values [next->dreg] = LLVMConstNull (LLVMInt32Type ());
MONO_DELETE_INS (bb, next);
}
break;
case OP_LDADDR: {
MonoInst *var = ins->inst_i0;
MonoClass *klass = var->klass;
if (var->opcode == OP_VTARG_ADDR && !MONO_CLASS_IS_SIMD(cfg, klass)) {
/* The variable contains the vtype address */
values [ins->dreg] = values [var->dreg];
} else if (var->opcode == OP_GSHAREDVT_LOCAL) {
values [ins->dreg] = emit_gsharedvt_ldaddr (ctx, var->dreg);
} else {
values [ins->dreg] = addresses [var->dreg];
}
break;
}
case OP_SIN: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SIN, args, dname);
break;
}
case OP_SINF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SINF, args, dname);
break;
}
case OP_EXP: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_EXP, args, dname);
break;
}
case OP_EXPF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_EXPF, args, dname);
break;
}
case OP_LOG2: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2, args, dname);
break;
}
case OP_LOG2F: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2F, args, dname);
break;
}
case OP_LOG10: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10, args, dname);
break;
}
case OP_LOG10F: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10F, args, dname);
break;
}
case OP_LOG: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG, args, dname);
break;
}
case OP_TRUNC: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNC, args, dname);
break;
}
case OP_TRUNCF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNCF, args, dname);
break;
}
case OP_COS: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COS, args, dname);
break;
}
case OP_COSF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COSF, args, dname);
break;
}
case OP_SQRT: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SQRT, args, dname);
break;
}
case OP_SQRTF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SQRTF, args, dname);
break;
}
case OP_FLOOR: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FLOOR, args, dname);
break;
}
case OP_FLOORF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FLOORF, args, dname);
break;
}
case OP_CEIL: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_CEIL, args, dname);
break;
}
case OP_CEILF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_CEILF, args, dname);
break;
}
case OP_FMA: {
LLVMValueRef args [3];
args [0] = convert (ctx, values [ins->sreg1], LLVMDoubleType ());
args [1] = convert (ctx, values [ins->sreg2], LLVMDoubleType ());
args [2] = convert (ctx, values [ins->sreg3], LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FMA, args, dname);
break;
}
case OP_FMAF: {
LLVMValueRef args [3];
args [0] = convert (ctx, values [ins->sreg1], LLVMFloatType ());
args [1] = convert (ctx, values [ins->sreg2], LLVMFloatType ());
args [2] = convert (ctx, values [ins->sreg3], LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FMAF, args, dname);
break;
}
case OP_ABS: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname);
break;
}
case OP_ABSF: {
LLVMValueRef args [1];
#ifdef TARGET_AMD64
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_ABSF, args, dname);
#else
/* llvm.fabs not supported on all platforms */
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname);
values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ());
#endif
break;
}
case OP_RPOW: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMFloatType ());
args [1] = convert (ctx, rhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_POWF, args, dname);
break;
}
case OP_FPOW: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
args [1] = convert (ctx, rhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_POW, args, dname);
break;
}
case OP_FCOPYSIGN: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
args [1] = convert (ctx, rhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGN, args, dname);
break;
}
case OP_RCOPYSIGN: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMFloatType ());
args [1] = convert (ctx, rhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGNF, args, dname);
break;
}
case OP_IMIN:
case OP_LMIN:
case OP_IMAX:
case OP_LMAX:
case OP_IMIN_UN:
case OP_LMIN_UN:
case OP_IMAX_UN:
case OP_LMAX_UN:
case OP_FMIN:
case OP_FMAX:
case OP_RMIN:
case OP_RMAX: {
LLVMValueRef v;
lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
switch (ins->opcode) {
case OP_IMIN:
case OP_LMIN:
v = LLVMBuildICmp (builder, LLVMIntSLE, lhs, rhs, "");
break;
case OP_IMAX:
case OP_LMAX:
v = LLVMBuildICmp (builder, LLVMIntSGE, lhs, rhs, "");
break;
case OP_IMIN_UN:
case OP_LMIN_UN:
v = LLVMBuildICmp (builder, LLVMIntULE, lhs, rhs, "");
break;
case OP_IMAX_UN:
case OP_LMAX_UN:
v = LLVMBuildICmp (builder, LLVMIntUGE, lhs, rhs, "");
break;
case OP_FMAX:
case OP_RMAX:
v = LLVMBuildFCmp (builder, LLVMRealUGE, lhs, rhs, "");
break;
case OP_FMIN:
case OP_RMIN:
v = LLVMBuildFCmp (builder, LLVMRealULE, lhs, rhs, "");
break;
default:
g_assert_not_reached ();
break;
}
values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname);
break;
}
/*
* See the ARM64 comment in mono/utils/atomic.h for an explanation of why this
* hack is necessary (for now).
*/
#ifdef TARGET_ARM64
#define ARM64_ATOMIC_FENCE_FIX mono_llvm_build_fence (builder, LLVM_BARRIER_SEQ)
#else
#define ARM64_ATOMIC_FENCE_FIX
#endif
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8: {
LLVMValueRef args [2];
LLVMTypeRef t;
if (ins->opcode == OP_ATOMIC_EXCHANGE_I4)
t = LLVMInt32Type ();
else
t = LLVMInt64Type ();
g_assert (ins->inst_offset == 0);
args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
args [1] = convert (ctx, rhs, t);
ARM64_ATOMIC_FENCE_FIX;
values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]);
ARM64_ATOMIC_FENCE_FIX;
break;
}
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_AND_I4:
case OP_ATOMIC_AND_I8:
case OP_ATOMIC_OR_I4:
case OP_ATOMIC_OR_I8: {
LLVMValueRef args [2];
LLVMTypeRef t;
if (ins->type == STACK_I4)
t = LLVMInt32Type ();
else
t = LLVMInt64Type ();
g_assert (ins->inst_offset == 0);
args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
args [1] = convert (ctx, rhs, t);
ARM64_ATOMIC_FENCE_FIX;
if (ins->opcode == OP_ATOMIC_ADD_I4 || ins->opcode == OP_ATOMIC_ADD_I8)
// Interlocked.Add returns new value (that's why we emit additional Add here)
// see https://github.com/dotnet/runtime/pull/33102
values [ins->dreg] = LLVMBuildAdd (builder, mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_ADD, args [0], args [1]), args [1], dname);
else if (ins->opcode == OP_ATOMIC_AND_I4 || ins->opcode == OP_ATOMIC_AND_I8)
values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_AND, args [0], args [1]);
else if (ins->opcode == OP_ATOMIC_OR_I4 || ins->opcode == OP_ATOMIC_OR_I8)
values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_OR, args [0], args [1]);
else
g_assert_not_reached ();
ARM64_ATOMIC_FENCE_FIX;
break;
}
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8: {
LLVMValueRef args [3], val;
LLVMTypeRef t;
if (ins->opcode == OP_ATOMIC_CAS_I4)
t = LLVMInt32Type ();
else
t = LLVMInt64Type ();
args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
/* comparand */
args [1] = convert (ctx, values [ins->sreg3], t);
/* new value */
args [2] = convert (ctx, values [ins->sreg2], t);
ARM64_ATOMIC_FENCE_FIX;
val = mono_llvm_build_cmpxchg (builder, args [0], args [1], args [2]);
ARM64_ATOMIC_FENCE_FIX;
/* cmpxchg returns a pair */
values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, "");
break;
}
case OP_MEMORY_BARRIER: {
mono_llvm_build_fence (builder, (BarrierKind) ins->backend.memory_barrier_kind);
break;
}
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8: {
int size;
gboolean sext, zext;
LLVMTypeRef t;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind;
LLVMValueRef index, addr;
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
if (sext || zext)
dname = (char *)"";
if (ins->inst_offset != 0) {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, lhs, LLVMPointerType (t, 0)), &index, 1, "");
} else {
addr = lhs;
}
addr = convert (ctx, addr, LLVMPointerType (t, 0));
ARM64_ATOMIC_FENCE_FIX;
values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, lhs, dname, is_faulting, is_volatile, barrier);
ARM64_ATOMIC_FENCE_FIX;
if (sext)
values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
else if (zext)
values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_U8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8: {
int size;
gboolean sext, zext;
LLVMTypeRef t;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind;
LLVMValueRef index, addr, value, base;
if (!values [ins->inst_destbasereg]) {
set_failure (ctx, "inst_destbasereg");
break;
}
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
base = values [ins->inst_destbasereg];
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
value = convert (ctx, values [ins->sreg1], t);
ARM64_ATOMIC_FENCE_FIX;
emit_store_general (ctx, bb, &builder, size, value, addr, base, is_faulting, is_volatile, barrier);
ARM64_ATOMIC_FENCE_FIX;
break;
}
case OP_RELAXED_NOP: {
#if defined(TARGET_AMD64) || defined(TARGET_X86)
call_intrins (ctx, INTRINS_SSE_PAUSE, NULL, "");
break;
#else
break;
#endif
}
case OP_TLS_GET: {
#if (defined(TARGET_AMD64) || defined(TARGET_X86)) && defined(__linux__)
#ifdef TARGET_AMD64
// 257 == FS segment register
LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 257);
#else
// 256 == GS segment register
LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256);
#endif
// FIXME: XEN
values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), ins->inst_offset, TRUE), ptrtype, ""), "");
#elif defined(TARGET_AMD64) && defined(TARGET_OSX)
/* See mono_amd64_emit_tls_get () */
int offset = mono_amd64_get_tls_gs_offset () + (ins->inst_offset * 8);
// 256 == GS segment register
LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256);
values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), offset, TRUE), ptrtype, ""), "");
#else
set_failure (ctx, "opcode tls-get");
break;
#endif
break;
}
case OP_GC_SAFE_POINT: {
LLVMValueRef val, cmp, callee, call;
LLVMBasicBlockRef poll_bb, cont_bb;
LLVMValueRef args [2];
static LLVMTypeRef sig;
const char *icall_name = "mono_threads_state_poll";
/*
* Create the cold wrapper around the icall, along with a managed method for it so
* unwinding works.
*/
if (!cfg->compile_aot && !ctx->module->gc_poll_cold_wrapper_compiled) {
ERROR_DECL (error);
/* Compiling a method here is a bit ugly, but it works */
MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL);
ctx->module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error);
mono_error_assert_ok (error);
}
if (!sig)
sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
/*
* if (!*sreg1)
* mono_threads_state_poll ();
*/
val = mono_llvm_build_load (builder, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), "", TRUE);
cmp = LLVMBuildICmp (builder, LLVMIntEQ, val, LLVMConstNull (LLVMTypeOf (val)), "");
poll_bb = gen_bb (ctx, "POLL_BB");
cont_bb = gen_bb (ctx, "CONT_BB");
args [0] = cmp;
args [1] = LLVMConstInt (LLVMInt1Type (), 1, FALSE);
cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, "");
mono_llvm_build_weighted_branch (builder, cmp, cont_bb, poll_bb, 1000, 1);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, poll_bb);
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
call = LLVMBuildCall (builder, callee, NULL, 0, "");
} else {
callee = get_jit_callee (ctx, icall_name, sig, MONO_PATCH_INFO_ABS, ctx->module->gc_poll_cold_wrapper_compiled);
call = LLVMBuildCall (builder, callee, NULL, 0, "");
set_call_cold_cconv (call);
}
LLVMBuildBr (builder, cont_bb);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, cont_bb);
ctx->bblocks [bb->block_num].end_bblock = cont_bb;
break;
}
/*
* Overflow opcodes.
*/
case OP_IADD_OVF:
case OP_IADD_OVF_UN:
case OP_ISUB_OVF:
case OP_ISUB_OVF_UN:
case OP_IMUL_OVF:
case OP_IMUL_OVF_UN:
case OP_LADD_OVF:
case OP_LADD_OVF_UN:
case OP_LSUB_OVF:
case OP_LSUB_OVF_UN:
case OP_LMUL_OVF:
case OP_LMUL_OVF_UN: {
LLVMValueRef args [2], val, ovf;
IntrinsicId intrins;
args [0] = convert (ctx, lhs, op_to_llvm_type (ins->opcode));
args [1] = convert (ctx, rhs, op_to_llvm_type (ins->opcode));
intrins = ovf_op_to_intrins (ins->opcode);
val = call_intrins (ctx, intrins, args, "");
values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, dname);
ovf = LLVMBuildExtractValue (builder, val, 1, "");
emit_cond_system_exception (ctx, bb, ins->inst_exc_name, ovf, FALSE);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
break;
}
/*
* Valuetypes.
* We currently model them using arrays. Promotion to local vregs is
* disabled for them in mono_handle_global_vregs () in the LLVM case,
* so we always have an entry in cfg->varinfo for them.
* FIXME: Is this needed ?
*/
case OP_VZERO: {
MonoClass *klass = ins->klass;
if (!klass) {
// FIXME:
set_failure (ctx, "!klass");
break;
}
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (klass), "vzero");
LLVMValueRef ptr = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), "");
emit_memset (ctx, builder, ptr, const_int32 (mono_class_value_size (klass, NULL)), 0);
break;
}
case OP_DUMMY_VZERO:
break;
case OP_STOREV_MEMBASE:
case OP_LOADV_MEMBASE:
case OP_VMOVE: {
MonoClass *klass = ins->klass;
LLVMValueRef src = NULL, dst, args [5];
gboolean done = FALSE;
gboolean is_volatile = FALSE;
if (!klass) {
// FIXME:
set_failure (ctx, "!klass");
break;
}
if (mini_is_gsharedvt_klass (klass)) {
// FIXME:
set_failure (ctx, "gsharedvt");
break;
}
switch (ins->opcode) {
case OP_STOREV_MEMBASE:
if (cfg->gen_write_barriers && m_class_has_references (klass) && ins->inst_destbasereg != cfg->frame_reg &&
LLVMGetInstructionOpcode (values [ins->inst_destbasereg]) != LLVMAlloca) {
/* Decomposed earlier */
g_assert_not_reached ();
break;
}
if (!addresses [ins->sreg1]) {
/* SIMD */
g_assert (values [ins->sreg1]);
dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (klass)), 0));
LLVMBuildStore (builder, values [ins->sreg1], dst);
done = TRUE;
} else {
src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), "");
dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0));
}
break;
case OP_LOADV_MEMBASE:
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass));
src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0));
dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), "");
break;
case OP_VMOVE:
if (!addresses [ins->sreg1])
addresses [ins->sreg1] = build_alloca (ctx, m_class_get_byval_arg (klass));
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass));
src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), "");
dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), "");
break;
default:
g_assert_not_reached ();
}
if (!ctx_ok (ctx))
break;
if (done)
break;
#ifdef TARGET_WASM
is_volatile = m_class_has_references (klass);
#endif
int aindex = 0;
args [aindex ++] = dst;
args [aindex ++] = src;
args [aindex ++] = LLVMConstInt (LLVMInt32Type (), mono_class_value_size (klass, NULL), FALSE);
args [aindex ++] = LLVMConstInt (LLVMInt1Type (), is_volatile ? 1 : 0, FALSE);
call_intrins (ctx, INTRINS_MEMCPY, args, "");
break;
}
case OP_LLVM_OUTARG_VT: {
LLVMArgInfo *ainfo = (LLVMArgInfo*)ins->inst_p0;
MonoType *t = mini_get_underlying_type (ins->inst_vtype);
if (ainfo->storage == LLVMArgGsharedvtVariable) {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1);
if (var && var->opcode == OP_GSHAREDVT_LOCAL) {
addresses [ins->dreg] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), LLVMPointerType (IntPtrType (), 0));
} else {
g_assert (addresses [ins->sreg1]);
addresses [ins->dreg] = addresses [ins->sreg1];
}
} else if (ainfo->storage == LLVMArgGsharedvtFixed) {
if (!addresses [ins->sreg1]) {
addresses [ins->sreg1] = build_alloca (ctx, t);
g_assert (values [ins->sreg1]);
}
LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], LLVMGetElementType (LLVMTypeOf (addresses [ins->sreg1]))), addresses [ins->sreg1]);
addresses [ins->dreg] = addresses [ins->sreg1];
} else {
if (!addresses [ins->sreg1]) {
addresses [ins->sreg1] = build_named_alloca (ctx, t, "llvm_outarg_vt");
g_assert (values [ins->sreg1]);
LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], type_to_llvm_type (ctx, t)), addresses [ins->sreg1]);
addresses [ins->dreg] = addresses [ins->sreg1];
} else if (ainfo->storage == LLVMArgVtypeAddr || values [ins->sreg1] == addresses [ins->sreg1]) {
/* LLVMArgVtypeByRef/LLVMArgVtypeAddr, have to make a copy */
addresses [ins->dreg] = build_alloca (ctx, t);
LLVMValueRef v = LLVMBuildLoad (builder, addresses [ins->sreg1], "llvm_outarg_vt_copy");
LLVMBuildStore (builder, convert (ctx, v, type_to_llvm_type (ctx, t)), addresses [ins->dreg]);
} else {
if (values [ins->sreg1]) {
LLVMTypeRef src_t = LLVMTypeOf (values [ins->sreg1]);
LLVMValueRef dst = convert (ctx, addresses [ins->sreg1], LLVMPointerType (src_t, 0));
LLVMBuildStore (builder, values [ins->sreg1], dst);
}
addresses [ins->dreg] = addresses [ins->sreg1];
}
}
break;
}
case OP_OBJC_GET_SELECTOR: {
const char *name = (const char*)ins->inst_p0;
LLVMValueRef var;
if (!ctx->module->objc_selector_to_var) {
ctx->module->objc_selector_to_var = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), 8), "@OBJC_IMAGE_INFO");
int32_t objc_imageinfo [] = { 0, 16 };
LLVMSetInitializer (info_var, mono_llvm_create_constant_data_array ((uint8_t *) &objc_imageinfo, 8));
LLVMSetLinkage (info_var, LLVMPrivateLinkage);
LLVMSetExternallyInitialized (info_var, TRUE);
LLVMSetSection (info_var, "__DATA, __objc_imageinfo,regular,no_dead_strip");
LLVMSetAlignment (info_var, sizeof (target_mgreg_t));
mark_as_used (ctx->module, info_var);
}
var = (LLVMValueRef)g_hash_table_lookup (ctx->module->objc_selector_to_var, name);
if (!var) {
LLVMValueRef indexes [16];
LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), strlen (name) + 1), "@OBJC_METH_VAR_NAME_");
LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((const uint8_t*)name, strlen (name) + 1));
LLVMSetLinkage (name_var, LLVMPrivateLinkage);
LLVMSetSection (name_var, "__TEXT,__objc_methname,cstring_literals");
mark_as_used (ctx->module, name_var);
LLVMValueRef ref_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (LLVMInt8Type (), 0), "@OBJC_SELECTOR_REFERENCES_");
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, 0);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, 0);
LLVMSetInitializer (ref_var, LLVMConstGEP (name_var, indexes, 2));
LLVMSetLinkage (ref_var, LLVMPrivateLinkage);
LLVMSetExternallyInitialized (ref_var, TRUE);
LLVMSetSection (ref_var, "__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
LLVMSetAlignment (ref_var, sizeof (target_mgreg_t));
mark_as_used (ctx->module, ref_var);
g_hash_table_insert (ctx->module->objc_selector_to_var, g_strdup (name), ref_var);
var = ref_var;
}
values [ins->dreg] = LLVMBuildLoad (builder, var, "");
break;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM)
case OP_EXTRACTX_U2:
case OP_XEXTRACT_I1:
case OP_XEXTRACT_I2:
case OP_XEXTRACT_I4:
case OP_XEXTRACT_I8:
case OP_XEXTRACT_R4:
case OP_XEXTRACT_R8:
case OP_EXTRACT_I1:
case OP_EXTRACT_I2:
case OP_EXTRACT_I4:
case OP_EXTRACT_I8:
case OP_EXTRACT_R4:
case OP_EXTRACT_R8: {
MonoTypeEnum mono_elt_t = inst_c1_type (ins);
LLVMTypeRef elt_t = primitive_type_to_llvm_type (mono_elt_t);
gboolean sext = FALSE;
gboolean zext = FALSE;
switch (mono_elt_t) {
case MONO_TYPE_I1: case MONO_TYPE_I2: sext = TRUE; break;
case MONO_TYPE_U1: case MONO_TYPE_U2: zext = TRUE; break;
}
LLVMValueRef element_ix = NULL;
switch (ins->opcode) {
case OP_XEXTRACT_I1:
case OP_XEXTRACT_I2:
case OP_XEXTRACT_I4:
case OP_XEXTRACT_R4:
case OP_XEXTRACT_R8:
case OP_XEXTRACT_I8:
element_ix = rhs;
break;
default:
element_ix = const_int32 (ins->inst_c0);
}
LLVMTypeRef lhs_t = LLVMTypeOf (lhs);
int vec_width = mono_llvm_get_prim_size_bits (lhs_t);
int elem_width = mono_llvm_get_prim_size_bits (elt_t);
int elements = vec_width / elem_width;
element_ix = LLVMBuildAnd (builder, element_ix, const_int32 (elements - 1), "extract");
LLVMTypeRef ret_t = LLVMVectorType (elt_t, elements);
LLVMValueRef src = LLVMBuildBitCast (builder, lhs, ret_t, "extract");
LLVMValueRef result = LLVMBuildExtractElement (builder, src, element_ix, "extract");
if (zext)
result = LLVMBuildZExt (builder, result, i4_t, "extract_zext");
else if (sext)
result = LLVMBuildSExt (builder, result, i4_t, "extract_sext");
values [ins->dreg] = result;
break;
}
case OP_XINSERT_I1:
case OP_XINSERT_I2:
case OP_XINSERT_I4:
case OP_XINSERT_I8:
case OP_XINSERT_R4:
case OP_XINSERT_R8: {
MonoTypeEnum primty = inst_c1_type (ins);
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
int elements = LLVMGetVectorSize (ret_t);
LLVMValueRef element_ix = LLVMBuildAnd (builder, arg3, const_int32 (elements - 1), "xinsert");
LLVMValueRef vec = convert (ctx, lhs, ret_t);
LLVMValueRef val = convert_full (ctx, rhs, elem_t, primitive_type_is_unsigned (primty));
LLVMValueRef result = LLVMBuildInsertElement (builder, vec, val, element_ix, "xinsert");
values [ins->dreg] = result;
break;
}
case OP_EXPAND_I1:
case OP_EXPAND_I2:
case OP_EXPAND_I4:
case OP_EXPAND_I8:
case OP_EXPAND_R4:
case OP_EXPAND_R8: {
LLVMTypeRef t;
LLVMValueRef mask [MAX_VECTOR_ELEMS], v;
int i;
t = simd_class_to_llvm_type (ctx, ins->klass);
for (i = 0; i < MAX_VECTOR_ELEMS; ++i)
mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
v = convert (ctx, values [ins->sreg1], LLVMGetElementType (t));
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (t), v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
values [ins->dreg] = LLVMBuildShuffleVector (builder, values [ins->dreg], LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), "");
break;
}
case OP_XZERO: {
values [ins->dreg] = LLVMConstNull (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)));
break;
}
case OP_LOADX_MEMBASE: {
LLVMTypeRef t = type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass));
LLVMValueRef src;
src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0));
values [ins->dreg] = mono_llvm_build_aligned_load (builder, src, "", FALSE, 1);
break;
}
case OP_STOREX_MEMBASE: {
LLVMTypeRef t = LLVMTypeOf (values [ins->sreg1]);
LLVMValueRef dest;
dest = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0));
mono_llvm_build_aligned_store (builder, values [ins->sreg1], dest, FALSE, 1);
break;
}
case OP_XBINOP:
case OP_XBINOP_SCALAR:
case OP_XBINOP_BYSCALAR: {
gboolean scalar = ins->opcode == OP_XBINOP_SCALAR;
gboolean byscalar = ins->opcode == OP_XBINOP_BYSCALAR;
LLVMValueRef result = NULL;
LLVMValueRef args [] = { lhs, rhs };
if (scalar)
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
if (byscalar) {
LLVMTypeRef t = LLVMTypeOf (args [0]);
unsigned int elems = LLVMGetVectorSize (t);
args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems);
}
LLVMValueRef l = args [0];
LLVMValueRef r = args [1];
switch (ins->inst_c0) {
case OP_IADD:
result = LLVMBuildAdd (builder, l, r, "");
break;
case OP_ISUB:
result = LLVMBuildSub (builder, l, r, "");
break;
case OP_IMUL:
result = LLVMBuildMul (builder, l, r, "");
break;
case OP_IAND:
result = LLVMBuildAnd (builder, l, r, "");
break;
case OP_IOR:
result = LLVMBuildOr (builder, l, r, "");
break;
case OP_IXOR:
result = LLVMBuildXor (builder, l, r, "");
break;
case OP_FADD:
result = LLVMBuildFAdd (builder, l, r, "");
break;
case OP_FSUB:
result = LLVMBuildFSub (builder, l, r, "");
break;
case OP_FMUL:
result = LLVMBuildFMul (builder, l, r, "");
break;
case OP_FDIV:
result = LLVMBuildFDiv (builder, l, r, "");
break;
case OP_FMAX:
case OP_FMIN: {
LLVMValueRef args [] = { l, r };
#if defined(TARGET_X86) || defined(TARGET_AMD64)
LLVMTypeRef t = LLVMTypeOf (l);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
unsigned int v_size = elems * elem_bits;
if (v_size == 128) {
gboolean is_r4 = ins->inst_c1 == MONO_TYPE_R4;
int iid = -1;
if (ins->inst_c0 == OP_FMAX) {
if (elems == 1)
iid = is_r4 ? INTRINS_SSE_MAXSS : INTRINS_SSE_MAXSD;
else
iid = is_r4 ? INTRINS_SSE_MAXPS : INTRINS_SSE_MAXPD;
} else {
if (elems == 1)
iid = is_r4 ? INTRINS_SSE_MINSS : INTRINS_SSE_MINSD;
else
iid = is_r4 ? INTRINS_SSE_MINPS : INTRINS_SSE_MINPD;
}
result = call_intrins (ctx, iid, args, dname);
} else {
LLVMRealPredicate op = ins->inst_c0 == OP_FMAX ? LLVMRealUGE : LLVMRealULE;
LLVMValueRef cmp = LLVMBuildFCmp (builder, op, l, r, "");
result = LLVMBuildSelect (builder, cmp, l, r, "");
}
#elif defined(TARGET_ARM64)
IntrinsicId iid = ins->inst_c0 == OP_FMAX ? INTRINS_AARCH64_ADV_SIMD_FMAX : INTRINS_AARCH64_ADV_SIMD_FMIN;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
#else
NOT_IMPLEMENTED;
#endif
break;
}
case OP_IMAX:
case OP_IMIN: {
gboolean is_unsigned = ins->inst_c1 == MONO_TYPE_U1 || ins->inst_c1 == MONO_TYPE_U2 || ins->inst_c1 == MONO_TYPE_U4 || ins->inst_c1 == MONO_TYPE_U8;
LLVMIntPredicate op;
switch (ins->inst_c0) {
case OP_IMAX:
op = is_unsigned ? LLVMIntUGT : LLVMIntSGT;
break;
case OP_IMIN:
op = is_unsigned ? LLVMIntULT : LLVMIntSLT;
break;
default:
g_assert_not_reached ();
}
#if defined(TARGET_ARM64)
if ((ins->inst_c1 == MONO_TYPE_U8) || (ins->inst_c1 == MONO_TYPE_I8)) {
LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, "");
result = LLVMBuildSelect (builder, cmp, l, r, "");
} else {
IntrinsicId iid;
switch (ins->inst_c0) {
case OP_IMAX:
iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMAX : INTRINS_AARCH64_ADV_SIMD_SMAX;
break;
case OP_IMIN:
iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMIN : INTRINS_AARCH64_ADV_SIMD_SMIN;
break;
default:
g_assert_not_reached ();
}
LLVMValueRef args [] = { l, r };
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
}
#else
LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, "");
result = LLVMBuildSelect (builder, cmp, l, r, "");
#endif
break;
}
default:
g_assert_not_reached ();
}
if (scalar)
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_XBINOP_FORCEINT: {
LLVMTypeRef t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef intermediate_elem_t = LLVMIntType (elem_bits);
LLVMTypeRef intermediate_t = LLVMVectorType (intermediate_elem_t, elems);
LLVMValueRef lhs_int = convert (ctx, lhs, intermediate_t);
LLVMValueRef rhs_int = convert (ctx, rhs, intermediate_t);
LLVMValueRef result = NULL;
switch (ins->inst_c0) {
case XBINOP_FORCEINT_and:
result = LLVMBuildAnd (builder, lhs_int, rhs_int, "");
break;
case XBINOP_FORCEINT_or:
result = LLVMBuildOr (builder, lhs_int, rhs_int, "");
break;
case XBINOP_FORCEINT_ornot:
result = LLVMBuildNot (builder, rhs_int, "");
result = LLVMBuildOr (builder, result, lhs_int, "");
break;
case XBINOP_FORCEINT_xor:
result = LLVMBuildXor (builder, lhs_int, rhs_int, "");
break;
}
values [ins->dreg] = LLVMBuildBitCast (builder, result, t, "");
break;
}
case OP_CREATE_SCALAR:
case OP_CREATE_SCALAR_UNSAFE: {
MonoTypeEnum primty = inst_c1_type (ins);
LLVMTypeRef type = simd_class_to_llvm_type (ctx, ins->klass);
// use undef vector (most likely empty but may contain garbage values) for OP_CREATE_SCALAR_UNSAFE
// and zero one for OP_CREATE_SCALAR
LLVMValueRef vector = (ins->opcode == OP_CREATE_SCALAR) ? LLVMConstNull (type) : LLVMGetUndef (type);
LLVMValueRef val = convert_full (ctx, lhs, primitive_type_to_llvm_type (primty), primitive_type_is_unsigned (primty));
values [ins->dreg] = LLVMBuildInsertElement (builder, vector, val, const_int32 (0), "");
break;
}
case OP_INSERT_I1:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt8Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_I2:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt16Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_I4:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_I8:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt64Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_R4:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMFloatType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_R8:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMDoubleType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_XCAST: {
LLVMTypeRef t = simd_class_to_llvm_type (ctx, ins->klass);
values [ins->dreg] = LLVMBuildBitCast (builder, lhs, t, "");
break;
}
case OP_XCONCAT: {
values [ins->dreg] = concatenate_vectors (ctx, lhs, rhs);
break;
}
case OP_XINSERT_LOWER:
case OP_XINSERT_UPPER: {
const char *oname = ins->opcode == OP_XINSERT_LOWER ? "xinsert_lower" : "xinsert_upper";
int ix = ins->opcode == OP_XINSERT_LOWER ? 0 : 1;
LLVMTypeRef src_t = LLVMTypeOf (lhs);
unsigned int width = mono_llvm_get_prim_size_bits (src_t);
LLVMTypeRef int_t = LLVMIntType (width / 2);
LLVMTypeRef intvec_t = LLVMVectorType (int_t, 2);
LLVMValueRef insval = LLVMBuildBitCast (builder, rhs, int_t, oname);
LLVMValueRef val = LLVMBuildBitCast (builder, lhs, intvec_t, oname);
val = LLVMBuildInsertElement (builder, val, insval, const_int32 (ix), oname);
val = LLVMBuildBitCast (builder, val, src_t, oname);
values [ins->dreg] = val;
break;
}
case OP_XLOWER:
case OP_XUPPER: {
const char *oname = ins->opcode == OP_XLOWER ? "xlower" : "xupper";
LLVMTypeRef src_t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (src_t);
g_assert (elems >= 2 && elems <= MAX_VECTOR_ELEMS);
unsigned int ret_elems = elems / 2;
int startix = ins->opcode == OP_XLOWER ? 0 : ret_elems;
LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, LLVMGetUndef (src_t), create_const_vector_i32 (&mask_0_incr_1 [startix], ret_elems), oname);
values [ins->dreg] = val;
break;
}
case OP_XWIDEN:
case OP_XWIDEN_UNSAFE: {
const char *oname = ins->opcode == OP_XWIDEN ? "xwiden" : "xwiden_unsafe";
LLVMTypeRef src_t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (src_t);
g_assert (elems <= MAX_VECTOR_ELEMS / 2);
unsigned int ret_elems = elems * 2;
LLVMValueRef upper = ins->opcode == OP_XWIDEN ? LLVMConstNull (src_t) : LLVMGetUndef (src_t);
LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, upper, create_const_vector_i32 (mask_0_incr_1, ret_elems), oname);
values [ins->dreg] = val;
break;
}
#endif // defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM)
#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM)
case OP_PADDB:
case OP_PADDW:
case OP_PADDD:
case OP_PADDQ:
values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, "");
break;
case OP_ADDPD:
case OP_ADDPS:
values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, "");
break;
case OP_PSUBB:
case OP_PSUBW:
case OP_PSUBD:
case OP_PSUBQ:
values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, "");
break;
case OP_SUBPD:
case OP_SUBPS:
values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, "");
break;
case OP_MULPD:
case OP_MULPS:
values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, "");
break;
case OP_DIVPD:
case OP_DIVPS:
values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, "");
break;
case OP_PAND:
values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, "");
break;
case OP_POR:
values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, "");
break;
case OP_PXOR:
values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, "");
break;
case OP_PMULW:
case OP_PMULD:
values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, "");
break;
case OP_ANDPS:
case OP_ANDNPS:
case OP_ORPS:
case OP_XORPS:
case OP_ANDPD:
case OP_ANDNPD:
case OP_ORPD:
case OP_XORPD: {
LLVMTypeRef t, rt;
LLVMValueRef v = NULL;
switch (ins->opcode) {
case OP_ANDPS:
case OP_ANDNPS:
case OP_ORPS:
case OP_XORPS:
t = LLVMVectorType (LLVMInt32Type (), 4);
rt = LLVMVectorType (LLVMFloatType (), 4);
break;
case OP_ANDPD:
case OP_ANDNPD:
case OP_ORPD:
case OP_XORPD:
t = LLVMVectorType (LLVMInt64Type (), 2);
rt = LLVMVectorType (LLVMDoubleType (), 2);
break;
default:
t = LLVMInt32Type ();
rt = LLVMInt32Type ();
g_assert_not_reached ();
}
lhs = LLVMBuildBitCast (builder, lhs, t, "");
rhs = LLVMBuildBitCast (builder, rhs, t, "");
switch (ins->opcode) {
case OP_ANDPS:
case OP_ANDPD:
v = LLVMBuildAnd (builder, lhs, rhs, "");
break;
case OP_ORPS:
case OP_ORPD:
v = LLVMBuildOr (builder, lhs, rhs, "");
break;
case OP_XORPS:
case OP_XORPD:
v = LLVMBuildXor (builder, lhs, rhs, "");
break;
case OP_ANDNPS:
case OP_ANDNPD:
v = LLVMBuildAnd (builder, rhs, LLVMBuildNot (builder, lhs, ""), "");
break;
}
values [ins->dreg] = LLVMBuildBitCast (builder, v, rt, "");
break;
}
case OP_PMIND_UN:
case OP_PMINW_UN:
case OP_PMINB_UN: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntULT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PMAXD_UN:
case OP_PMAXW_UN:
case OP_PMAXB_UN: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntUGT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PMINW: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSLT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PMAXW: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PAVGB_UN:
case OP_PAVGW_UN: {
LLVMValueRef ones_vec;
LLVMValueRef ones [MAX_VECTOR_ELEMS];
int vector_size = LLVMGetVectorSize (LLVMTypeOf (lhs));
LLVMTypeRef ext_elem_type = vector_size == 16 ? LLVMInt16Type () : LLVMInt32Type ();
for (int i = 0; i < MAX_VECTOR_ELEMS; ++i)
ones [i] = LLVMConstInt (ext_elem_type, 1, FALSE);
ones_vec = LLVMConstVector (ones, vector_size);
LLVMValueRef val;
LLVMTypeRef ext_type = LLVMVectorType (ext_elem_type, vector_size);
/* Have to increase the vector element size to prevent overflows */
/* res = trunc ((zext (lhs) + zext (rhs) + 1) >> 1) */
val = LLVMBuildAdd (builder, LLVMBuildZExt (builder, lhs, ext_type, ""), LLVMBuildZExt (builder, rhs, ext_type, ""), "");
val = LLVMBuildAdd (builder, val, ones_vec, "");
val = LLVMBuildLShr (builder, val, ones_vec, "");
values [ins->dreg] = LLVMBuildTrunc (builder, val, LLVMTypeOf (lhs), "");
break;
}
case OP_PCMPEQB:
case OP_PCMPEQW:
case OP_PCMPEQD:
case OP_PCMPEQQ:
case OP_PCMPGTB: {
LLVMValueRef pcmp;
LLVMTypeRef retType;
LLVMIntPredicate cmpOp;
if (ins->opcode == OP_PCMPGTB)
cmpOp = LLVMIntSGT;
else
cmpOp = LLVMIntEQ;
if (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)) {
pcmp = LLVMBuildICmp (builder, cmpOp, lhs, rhs, "");
retType = LLVMTypeOf (lhs);
} else {
LLVMTypeRef flatType = LLVMVectorType (LLVMInt8Type (), 16);
LLVMValueRef flatRHS = convert (ctx, rhs, flatType);
LLVMValueRef flatLHS = convert (ctx, lhs, flatType);
pcmp = LLVMBuildICmp (builder, cmpOp, flatLHS, flatRHS, "");
retType = flatType;
}
values [ins->dreg] = LLVMBuildSExt (builder, pcmp, retType, "");
break;
}
case OP_CVTDQ2PS: {
LLVMValueRef i4 = LLVMBuildBitCast (builder, lhs, sse_i4_t, "");
values [ins->dreg] = LLVMBuildSIToFP (builder, i4, sse_r4_t, dname);
break;
}
case OP_CVTDQ2PD: {
LLVMValueRef indexes [16];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
LLVMValueRef mask = LLVMConstVector (indexes, 2);
LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, "");
values [ins->dreg] = LLVMBuildSIToFP (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname);
break;
}
case OP_SSE2_CVTSS2SD: {
LLVMValueRef rhs_elem = LLVMBuildExtractElement (builder, rhs, const_int32 (0), "");
LLVMValueRef fpext = LLVMBuildFPExt (builder, rhs_elem, LLVMDoubleType (), dname);
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fpext, const_int32 (0), "");
break;
}
case OP_CVTPS2PD: {
LLVMValueRef indexes [16];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
LLVMValueRef mask = LLVMConstVector (indexes, 2);
LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, "");
values [ins->dreg] = LLVMBuildFPExt (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname);
break;
}
case OP_CVTTPS2DQ:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMVectorType (LLVMInt32Type (), 4), dname);
break;
case OP_CVTPD2DQ:
case OP_CVTPS2DQ:
case OP_CVTPD2PS:
case OP_CVTTPD2DQ: {
LLVMValueRef v;
v = convert (ctx, values [ins->sreg1], simd_op_to_llvm_type (ins->opcode));
values [ins->dreg] = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &v, dname);
break;
}
case OP_COMPPS:
case OP_COMPPD: {
LLVMRealPredicate op;
switch (ins->inst_c0) {
case SIMD_COMP_EQ:
op = LLVMRealOEQ;
break;
case SIMD_COMP_LT:
op = LLVMRealOLT;
break;
case SIMD_COMP_LE:
op = LLVMRealOLE;
break;
case SIMD_COMP_UNORD:
op = LLVMRealUNO;
break;
case SIMD_COMP_NEQ:
op = LLVMRealUNE;
break;
case SIMD_COMP_NLT:
op = LLVMRealUGE;
break;
case SIMD_COMP_NLE:
op = LLVMRealUGT;
break;
case SIMD_COMP_ORD:
op = LLVMRealORD;
break;
default:
g_assert_not_reached ();
}
LLVMValueRef cmp = LLVMBuildFCmp (builder, op, lhs, rhs, "");
if (ins->opcode == OP_COMPPD)
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), 2), ""), LLVMTypeOf (lhs), "");
else
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), 4), ""), LLVMTypeOf (lhs), "");
break;
}
case OP_ICONV_TO_X:
/* This is only used for implementing shifts by non-immediate */
values [ins->dreg] = lhs;
break;
case OP_SHUFPS:
case OP_SHUFPD:
case OP_PSHUFLED:
case OP_PSHUFLEW_LOW:
case OP_PSHUFLEW_HIGH: {
int mask [16];
LLVMValueRef v1 = NULL, v2 = NULL, mask_values [16];
int i, mask_size = 0;
int imask = ins->inst_c0;
/* Convert the x86 shuffle mask to LLVM's */
switch (ins->opcode) {
case OP_SHUFPS:
mask_size = 4;
mask [0] = ((imask >> 0) & 3);
mask [1] = ((imask >> 2) & 3);
mask [2] = ((imask >> 4) & 3) + 4;
mask [3] = ((imask >> 6) & 3) + 4;
v1 = values [ins->sreg1];
v2 = values [ins->sreg2];
break;
case OP_SHUFPD:
mask_size = 2;
mask [0] = ((imask >> 0) & 1);
mask [1] = ((imask >> 1) & 1) + 2;
v1 = values [ins->sreg1];
v2 = values [ins->sreg2];
break;
case OP_PSHUFLEW_LOW:
mask_size = 8;
mask [0] = ((imask >> 0) & 3);
mask [1] = ((imask >> 2) & 3);
mask [2] = ((imask >> 4) & 3);
mask [3] = ((imask >> 6) & 3);
mask [4] = 4 + 0;
mask [5] = 4 + 1;
mask [6] = 4 + 2;
mask [7] = 4 + 3;
v1 = values [ins->sreg1];
v2 = LLVMGetUndef (LLVMTypeOf (v1));
break;
case OP_PSHUFLEW_HIGH:
mask_size = 8;
mask [0] = 0;
mask [1] = 1;
mask [2] = 2;
mask [3] = 3;
mask [4] = 4 + ((imask >> 0) & 3);
mask [5] = 4 + ((imask >> 2) & 3);
mask [6] = 4 + ((imask >> 4) & 3);
mask [7] = 4 + ((imask >> 6) & 3);
v1 = values [ins->sreg1];
v2 = LLVMGetUndef (LLVMTypeOf (v1));
break;
case OP_PSHUFLED:
mask_size = 4;
mask [0] = ((imask >> 0) & 3);
mask [1] = ((imask >> 2) & 3);
mask [2] = ((imask >> 4) & 3);
mask [3] = ((imask >> 6) & 3);
v1 = values [ins->sreg1];
v2 = LLVMGetUndef (LLVMTypeOf (v1));
break;
default:
g_assert_not_reached ();
}
for (i = 0; i < mask_size; ++i)
mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE);
values [ins->dreg] =
LLVMBuildShuffleVector (builder, v1, v2,
LLVMConstVector (mask_values, mask_size), dname);
break;
}
case OP_UNPACK_LOWB:
case OP_UNPACK_LOWW:
case OP_UNPACK_LOWD:
case OP_UNPACK_LOWQ:
case OP_UNPACK_LOWPS:
case OP_UNPACK_LOWPD:
case OP_UNPACK_HIGHB:
case OP_UNPACK_HIGHW:
case OP_UNPACK_HIGHD:
case OP_UNPACK_HIGHQ:
case OP_UNPACK_HIGHPS:
case OP_UNPACK_HIGHPD: {
int mask [16];
LLVMValueRef mask_values [16];
int i, mask_size = 0;
gboolean low = FALSE;
switch (ins->opcode) {
case OP_UNPACK_LOWB:
mask_size = 16;
low = TRUE;
break;
case OP_UNPACK_LOWW:
mask_size = 8;
low = TRUE;
break;
case OP_UNPACK_LOWD:
case OP_UNPACK_LOWPS:
mask_size = 4;
low = TRUE;
break;
case OP_UNPACK_LOWQ:
case OP_UNPACK_LOWPD:
mask_size = 2;
low = TRUE;
break;
case OP_UNPACK_HIGHB:
mask_size = 16;
break;
case OP_UNPACK_HIGHW:
mask_size = 8;
break;
case OP_UNPACK_HIGHD:
case OP_UNPACK_HIGHPS:
mask_size = 4;
break;
case OP_UNPACK_HIGHQ:
case OP_UNPACK_HIGHPD:
mask_size = 2;
break;
default:
g_assert_not_reached ();
}
if (low) {
for (i = 0; i < (mask_size / 2); ++i) {
mask [(i * 2)] = i;
mask [(i * 2) + 1] = mask_size + i;
}
} else {
for (i = 0; i < (mask_size / 2); ++i) {
mask [(i * 2)] = (mask_size / 2) + i;
mask [(i * 2) + 1] = mask_size + (mask_size / 2) + i;
}
}
for (i = 0; i < mask_size; ++i)
mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE);
values [ins->dreg] =
LLVMBuildShuffleVector (builder, values [ins->sreg1], values [ins->sreg2],
LLVMConstVector (mask_values, mask_size), dname);
break;
}
case OP_DUPPD: {
LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
LLVMValueRef v, val;
v = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
val = LLVMConstNull (t);
val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 1, FALSE), dname);
values [ins->dreg] = val;
break;
}
case OP_DUPPS_LOW:
case OP_DUPPS_HIGH: {
LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
LLVMValueRef v1, v2, val;
if (ins->opcode == OP_DUPPS_LOW) {
v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 2, FALSE), "");
} else {
v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 3, FALSE), "");
}
val = LLVMConstNull (t);
val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 2, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 3, FALSE), "");
values [ins->dreg] = val;
break;
}
case OP_FCONV_TO_R8_X: {
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r8_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
case OP_FCONV_TO_R4_X: {
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r4_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_SSE_MOVMSK: {
LLVMValueRef args [1];
if (ins->inst_c1 == MONO_TYPE_R4) {
args [0] = lhs;
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PS, args, dname);
} else if (ins->inst_c1 == MONO_TYPE_R8) {
args [0] = lhs;
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PD, args, dname);
} else {
args [0] = convert (ctx, lhs, sse_i1_t);
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PMOVMSKB, args, dname);
}
break;
}
case OP_SSE_MOVS:
case OP_SSE_MOVS2: {
if (ins->inst_c1 == MONO_TYPE_R4)
values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_4_i32 (0, 5, 6, 7), "");
else if (ins->inst_c1 == MONO_TYPE_R8)
values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_2_i32 (0, 3), "");
else if (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8)
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs,
LLVMConstInt (LLVMInt64Type (), 0, FALSE),
LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
else
g_assert_not_reached (); // will be needed for other types later
break;
}
case OP_SSE_MOVEHL: {
if (ins->inst_c1 == MONO_TYPE_R4)
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (6, 7, 2, 3), "");
else
g_assert_not_reached ();
break;
}
case OP_SSE_MOVELH: {
if (ins->inst_c1 == MONO_TYPE_R4)
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 1, 4, 5), "");
else
g_assert_not_reached ();
break;
}
case OP_SSE_UNPACKLO: {
if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (0, 2), "");
} else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 4, 1, 5), "");
} else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) {
const int mask_values [] = { 0, 8, 1, 9, 2, 10, 3, 11 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i2_t),
convert (ctx, rhs, sse_i2_t),
create_const_vector_i32 (mask_values, 8), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) {
const int mask_values [] = { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i1_t),
convert (ctx, rhs, sse_i1_t),
create_const_vector_i32 (mask_values, 16), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else {
g_assert_not_reached ();
}
break;
}
case OP_SSE_UNPACKHI: {
if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (1, 3), "");
} else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (2, 6, 3, 7), "");
} else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) {
const int mask_values [] = { 4, 12, 5, 13, 6, 14, 7, 15 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i2_t),
convert (ctx, rhs, sse_i2_t),
create_const_vector_i32 (mask_values, 8), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) {
const int mask_values [] = { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i1_t),
convert (ctx, rhs, sse_i1_t),
create_const_vector_i32 (mask_values, 16), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else {
g_assert_not_reached ();
}
break;
}
case OP_SSE_LOADU: {
LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0));
LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), "");
values [ins->dreg] = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, ins->inst_c0); // inst_c0 is alignment
break;
}
case OP_SSE_MOVSS: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0));
LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE);
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (type_to_sse_type (ins->inst_c1)), val, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
case OP_SSE_MOVSS_STORE: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0));
LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE);
break;
}
case OP_SSE2_MOVD:
case OP_SSE2_MOVQ:
case OP_SSE2_MOVUPD: {
LLVMTypeRef rty = NULL;
switch (ins->opcode) {
case OP_SSE2_MOVD: rty = sse_i4_t; break;
case OP_SSE2_MOVQ: rty = sse_i8_t; break;
case OP_SSE2_MOVUPD: rty = sse_r8_t; break;
}
LLVMTypeRef srcty = LLVMGetElementType (rty);
LLVMValueRef zero = LLVMConstNull (rty);
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (srcty, 0));
LLVMValueRef val = mono_llvm_build_aligned_load (builder, addr, "", FALSE, 1);
values [ins->dreg] = LLVMBuildInsertElement (builder, zero, val, const_int32 (0), dname);
break;
}
case OP_SSE_MOVLPS_LOAD:
case OP_SSE_MOVHPS_LOAD: {
LLVMTypeRef t = LLVMFloatType ();
int size = 4;
gboolean high = ins->opcode == OP_SSE_MOVHPS_LOAD;
/* Load two floats from rhs and store them in the low/high part of lhs */
LLVMValueRef addr = rhs;
LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (t, 0));
LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), size, FALSE), IntPtrType ()), ""), LLVMPointerType (t, 0));
LLVMValueRef val1 = mono_llvm_build_load (builder, addr1, "", FALSE);
LLVMValueRef val2 = mono_llvm_build_load (builder, addr2, "", FALSE);
int index1, index2;
index1 = high ? 2: 0;
index2 = high ? 3 : 1;
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMBuildInsertElement (builder, lhs, val1, LLVMConstInt (LLVMInt32Type (), index1, FALSE), ""), val2, LLVMConstInt (LLVMInt32Type (), index2, FALSE), "");
break;
}
case OP_SSE2_MOVLPD_LOAD:
case OP_SSE2_MOVHPD_LOAD: {
LLVMTypeRef t = LLVMDoubleType ();
LLVMValueRef addr = convert (ctx, rhs, LLVMPointerType (t, 0));
LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE);
int index = ins->opcode == OP_SSE2_MOVHPD_LOAD ? 1 : 0;
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, val, const_int32 (index), "");
break;
}
case OP_SSE_MOVLPS_STORE:
case OP_SSE_MOVHPS_STORE: {
/* Store two floats from the low/hight part of rhs into lhs */
LLVMValueRef addr = lhs;
LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (LLVMFloatType (), 0));
LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), 4, FALSE), IntPtrType ()), ""), LLVMPointerType (LLVMFloatType (), 0));
int index1 = ins->opcode == OP_SSE_MOVLPS_STORE ? 0 : 2;
int index2 = ins->opcode == OP_SSE_MOVLPS_STORE ? 1 : 3;
LLVMValueRef val1 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index1, FALSE), "");
LLVMValueRef val2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index2, FALSE), "");
mono_llvm_build_store (builder, val1, addr1, FALSE, LLVM_BARRIER_NONE);
mono_llvm_build_store (builder, val2, addr2, FALSE, LLVM_BARRIER_NONE);
break;
}
case OP_SSE2_MOVLPD_STORE:
case OP_SSE2_MOVHPD_STORE: {
LLVMTypeRef t = LLVMDoubleType ();
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (t, 0));
int index = ins->opcode == OP_SSE2_MOVHPD_STORE ? 1 : 0;
LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, const_int32 (index), "");
mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE);
break;
}
case OP_SSE_STORE: {
LLVMValueRef dst_vec = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0));
mono_llvm_build_aligned_store (builder, rhs, dst_vec, FALSE, ins->inst_c0);
break;
}
case OP_SSE_STORES: {
LLVMValueRef first_elem = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMValueRef dst = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (first_elem), 0));
mono_llvm_build_aligned_store (builder, first_elem, dst, FALSE, 1);
break;
}
case OP_SSE_MOVNTPS: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0));
LLVMValueRef store = mono_llvm_build_aligned_store (builder, rhs, addr, FALSE, ins->inst_c0);
set_nontemporal_flag (store);
break;
}
case OP_SSE_PREFETCHT0: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (3), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_PREFETCHT1: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (2), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_PREFETCHT2: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (1), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_PREFETCHNTA: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (0), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_OR: {
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildOr (builder, vec_lhs_i64, vec_rhs_i64, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_XOR: {
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildXor (builder, vec_lhs_i64, vec_rhs_i64, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_AND: {
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_lhs_i64, vec_rhs_i64, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_ANDN: {
LLVMValueRef minus_one [2];
minus_one [0] = LLVMConstInt (LLVMInt64Type (), -1, FALSE);
minus_one [1] = LLVMConstInt (LLVMInt64Type (), -1, FALSE);
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_xor = LLVMBuildXor (builder, vec_lhs_i64, LLVMConstVector (minus_one, 2), "");
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_rhs_i64, vec_xor, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_ADDSS:
case OP_SSE_SUBSS:
case OP_SSE_DIVSS:
case OP_SSE_MULSS:
case OP_SSE2_ADDSD:
case OP_SSE2_SUBSD:
case OP_SSE2_DIVSD:
case OP_SSE2_MULSD: {
LLVMValueRef v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMValueRef v2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMValueRef v = NULL;
switch (ins->opcode) {
case OP_SSE_ADDSS:
case OP_SSE2_ADDSD:
v = LLVMBuildFAdd (builder, v1, v2, "");
break;
case OP_SSE_SUBSS:
case OP_SSE2_SUBSD:
v = LLVMBuildFSub (builder, v1, v2, "");
break;
case OP_SSE_DIVSS:
case OP_SSE2_DIVSD:
v = LLVMBuildFDiv (builder, v1, v2, "");
break;
case OP_SSE_MULSS:
case OP_SSE2_MULSD:
v = LLVMBuildFMul (builder, v1, v2, "");
break;
default:
g_assert_not_reached ();
}
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
case OP_SSE_CMPSS:
case OP_SSE2_CMPSD: {
int imm = -1;
gboolean swap = FALSE;
switch (ins->inst_c0) {
case CMP_EQ: imm = SSE_eq_ord_nosignal; break;
case CMP_GT: imm = SSE_lt_ord_signal; swap = TRUE; break;
case CMP_GE: imm = SSE_le_ord_signal; swap = TRUE; break;
case CMP_LT: imm = SSE_lt_ord_signal; break;
case CMP_LE: imm = SSE_le_ord_signal; break;
case CMP_GT_UN: imm = SSE_nle_unord_signal; break;
case CMP_GE_UN: imm = SSE_nlt_unord_signal; break;
case CMP_LT_UN: imm = SSE_nle_unord_signal; swap = TRUE; break;
case CMP_LE_UN: imm = SSE_nlt_unord_signal; swap = TRUE; break;
case CMP_NE: imm = SSE_neq_unord_nosignal; break;
case CMP_ORD: imm = SSE_ord_nosignal; break;
case CMP_UNORD: imm = SSE_unord_nosignal; break;
default: g_assert_not_reached (); break;
}
LLVMValueRef cmp = LLVMConstInt (LLVMInt8Type (), imm, FALSE);
LLVMValueRef args [] = { lhs, rhs, cmp };
if (swap) {
args [0] = rhs;
args [1] = lhs;
}
IntrinsicId id = (IntrinsicId) 0;
switch (ins->opcode) {
case OP_SSE_CMPSS: id = INTRINS_SSE_CMPSS; break;
case OP_SSE2_CMPSD: id = INTRINS_SSE_CMPSD; break;
default: g_assert_not_reached (); break;
}
int elements = LLVMGetVectorSize (LLVMTypeOf (lhs));
int mask_values [MAX_VECTOR_ELEMS] = { 0 };
for (int i = 1; i < elements; ++i) {
mask_values [i] = elements + i;
}
LLVMValueRef result = call_intrins (ctx, id, args, "");
result = LLVMBuildShuffleVector (builder, result, lhs, create_const_vector_i32 (mask_values, elements), "");
values [ins->dreg] = result;
break;
}
case OP_SSE_COMISS: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_COMIEQ_SS; break;
case CMP_GT: id = INTRINS_SSE_COMIGT_SS; break;
case CMP_GE: id = INTRINS_SSE_COMIGE_SS; break;
case CMP_LT: id = INTRINS_SSE_COMILT_SS; break;
case CMP_LE: id = INTRINS_SSE_COMILE_SS; break;
case CMP_NE: id = INTRINS_SSE_COMINEQ_SS; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE_UCOMISS: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SS; break;
case CMP_GT: id = INTRINS_SSE_UCOMIGT_SS; break;
case CMP_GE: id = INTRINS_SSE_UCOMIGE_SS; break;
case CMP_LT: id = INTRINS_SSE_UCOMILT_SS; break;
case CMP_LE: id = INTRINS_SSE_UCOMILE_SS; break;
case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SS; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE2_COMISD: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_COMIEQ_SD; break;
case CMP_GT: id = INTRINS_SSE_COMIGT_SD; break;
case CMP_GE: id = INTRINS_SSE_COMIGE_SD; break;
case CMP_LT: id = INTRINS_SSE_COMILT_SD; break;
case CMP_LE: id = INTRINS_SSE_COMILE_SD; break;
case CMP_NE: id = INTRINS_SSE_COMINEQ_SD; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE2_UCOMISD: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SD; break;
case CMP_GT: id = INTRINS_SSE_UCOMIGT_SD; break;
case CMP_GE: id = INTRINS_SSE_UCOMIGE_SD; break;
case CMP_LT: id = INTRINS_SSE_UCOMILT_SD; break;
case CMP_LE: id = INTRINS_SSE_UCOMILE_SD; break;
case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SD; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE_CVTSI2SS:
case OP_SSE_CVTSI2SS64:
case OP_SSE2_CVTSI2SD:
case OP_SSE2_CVTSI2SD64: {
LLVMTypeRef ty = LLVMFloatType ();
switch (ins->opcode) {
case OP_SSE2_CVTSI2SD:
case OP_SSE2_CVTSI2SD64:
ty = LLVMDoubleType ();
break;
}
LLVMValueRef fp = LLVMBuildSIToFP (builder, rhs, ty, "");
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fp, const_int32 (0), dname);
break;
}
case OP_SSE2_PMULUDQ: {
LLVMValueRef i32_max = LLVMConstInt (LLVMInt64Type (), UINT32_MAX, FALSE);
LLVMValueRef maskvals [] = { i32_max, i32_max };
LLVMValueRef mask = LLVMConstVector (maskvals, 2);
LLVMValueRef l = LLVMBuildAnd (builder, convert (ctx, lhs, sse_i8_t), mask, "");
LLVMValueRef r = LLVMBuildAnd (builder, convert (ctx, rhs, sse_i8_t), mask, "");
values [ins->dreg] = LLVMBuildNUWMul (builder, l, r, dname);
break;
}
case OP_SSE_SQRTSS:
case OP_SSE2_SQRTSD: {
LLVMValueRef upper = values [ins->sreg1];
LLVMValueRef lower = values [ins->sreg2];
LLVMValueRef scalar = LLVMBuildExtractElement (builder, lower, const_int32 (0), "");
LLVMValueRef result = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &scalar, dname);
values [ins->dreg] = LLVMBuildInsertElement (builder, upper, result, const_int32 (0), "");
break;
}
case OP_SSE_RCPSS:
case OP_SSE_RSQRTSS: {
IntrinsicId id = (IntrinsicId)0;
switch (ins->opcode) {
case OP_SSE_RCPSS: id = INTRINS_SSE_RCP_SS; break;
case OP_SSE_RSQRTSS: id = INTRINS_SSE_RSQRT_SS; break;
default: g_assert_not_reached (); break;
};
LLVMValueRef result = call_intrins (ctx, id, &rhs, dname);
const int mask[] = { 0, 5, 6, 7 };
LLVMValueRef shufmask = create_const_vector_i32 (mask, 4);
values [ins->dreg] = LLVMBuildShuffleVector (builder, result, lhs, shufmask, "");
break;
}
case OP_XOP: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
call_intrins (ctx, id, NULL, "");
break;
}
case OP_XOP_X_I:
case OP_XOP_X_X:
case OP_XOP_I4_X:
case OP_XOP_I8_X:
case OP_XOP_X_X_X:
case OP_XOP_X_X_I4:
case OP_XOP_X_X_I8: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_XOP_I4_X_X: {
gboolean to_i8_t = FALSE;
gboolean ret_bool = FALSE;
IntrinsicId id = (IntrinsicId)ins->inst_c0;
switch (ins->inst_c0) {
case INTRINS_SSE_TESTC: to_i8_t = TRUE; ret_bool = TRUE; break;
case INTRINS_SSE_TESTZ: to_i8_t = TRUE; ret_bool = TRUE; break;
case INTRINS_SSE_TESTNZ: to_i8_t = TRUE; ret_bool = TRUE; break;
default: g_assert_not_reached (); break;
}
LLVMValueRef args [] = { lhs, rhs };
if (to_i8_t) {
args [0] = convert (ctx, args [0], sse_i8_t);
args [1] = convert (ctx, args [1], sse_i8_t);
}
LLVMValueRef call = call_intrins (ctx, id, args, "");
if (ret_bool) {
// if return type is bool (it's still i32) we need to normalize it to 1/0
LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, call, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), "");
} else {
values [ins->dreg] = call;
}
break;
}
case OP_SSE2_MASKMOVDQU: {
LLVMTypeRef i8ptr = LLVMPointerType (LLVMInt8Type (), 0);
LLVMValueRef dstaddr = convert (ctx, values [ins->sreg3], i8ptr);
LLVMValueRef src = convert (ctx, lhs, sse_i1_t);
LLVMValueRef mask = convert (ctx, rhs, sse_i1_t);
LLVMValueRef args[] = { src, mask, dstaddr };
call_intrins (ctx, INTRINS_SSE_MASKMOVDQU, args, "");
break;
}
case OP_PADDB_SAT:
case OP_PADDW_SAT:
case OP_PSUBB_SAT:
case OP_PSUBW_SAT:
case OP_PADDB_SAT_UN:
case OP_PADDW_SAT_UN:
case OP_PSUBB_SAT_UN:
case OP_PSUBW_SAT_UN:
case OP_SSE2_ADDS:
case OP_SSE2_SUBS: {
IntrinsicId id = (IntrinsicId)0;
int type = 0;
gboolean is_add = TRUE;
switch (ins->opcode) {
case OP_PADDB_SAT: type = MONO_TYPE_I1; break;
case OP_PADDW_SAT: type = MONO_TYPE_I2; break;
case OP_PSUBB_SAT: type = MONO_TYPE_I1; is_add = FALSE; break;
case OP_PSUBW_SAT: type = MONO_TYPE_I2; is_add = FALSE; break;
case OP_PADDB_SAT_UN: type = MONO_TYPE_U1; break;
case OP_PADDW_SAT_UN: type = MONO_TYPE_U2; break;
case OP_PSUBB_SAT_UN: type = MONO_TYPE_U1; is_add = FALSE; break;
case OP_PSUBW_SAT_UN: type = MONO_TYPE_U2; is_add = FALSE; break;
case OP_SSE2_ADDS: type = ins->inst_c1; break;
case OP_SSE2_SUBS: type = ins->inst_c1; is_add = FALSE; break;
default: g_assert_not_reached ();
}
if (is_add) {
switch (type) {
case MONO_TYPE_I1: id = INTRINS_SSE_SADD_SATI8; break;
case MONO_TYPE_U1: id = INTRINS_SSE_UADD_SATI8; break;
case MONO_TYPE_I2: id = INTRINS_SSE_SADD_SATI16; break;
case MONO_TYPE_U2: id = INTRINS_SSE_UADD_SATI16; break;
default: g_assert_not_reached (); break;
}
} else {
switch (type) {
case MONO_TYPE_I1: id = INTRINS_SSE_SSUB_SATI8; break;
case MONO_TYPE_U1: id = INTRINS_SSE_USUB_SATI8; break;
case MONO_TYPE_I2: id = INTRINS_SSE_SSUB_SATI16; break;
case MONO_TYPE_U2: id = INTRINS_SSE_USUB_SATI16; break;
default: g_assert_not_reached (); break;
}
}
LLVMTypeRef vecty = type_to_sse_type (type);
LLVMValueRef args [] = { convert (ctx, lhs, vecty), convert (ctx, rhs, vecty) };
LLVMValueRef result = call_intrins (ctx, id, args, dname);
values [ins->dreg] = convert (ctx, result, vecty);
break;
}
case OP_SSE2_PACKUS: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, sse_i2_t);
args [1] = convert (ctx, rhs, sse_i2_t);
values [ins->dreg] = convert (ctx,
call_intrins (ctx, INTRINS_SSE_PACKUSWB, args, dname),
type_to_sse_type (ins->inst_c1));
break;
}
case OP_SSE2_SRLI: {
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = convert (ctx,
call_intrins (ctx, INTRINS_SSE_PSRLI_W, args, dname),
type_to_sse_type (ins->inst_c1));
break;
}
case OP_SSE2_PSLLDQ:
case OP_SSE2_PSRLDQ: {
LLVMBasicBlockRef bbs [16 + 1];
LLVMValueRef switch_ins;
LLVMValueRef value = lhs;
LLVMValueRef index = rhs;
LLVMValueRef phi_values [16 + 1];
LLVMTypeRef t = sse_i1_t;
int nelems = 16;
int i;
gboolean shift_right = (ins->opcode == OP_SSE2_PSRLDQ);
value = convert (ctx, value, t);
// No corresponding LLVM intrinsics
// FIXME: Optimize const count
for (i = 0; i < nelems; ++i)
bbs [i] = gen_bb (ctx, "PSLLDQ_CASE_BB");
bbs [nelems] = gen_bb (ctx, "PSLLDQ_DEF_BB");
cbb = gen_bb (ctx, "PSLLDQ_COND_BB");
switch_ins = LLVMBuildSwitch (builder, index, bbs [nelems], 0);
for (i = 0; i < nelems; ++i) {
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
LLVMPositionBuilderAtEnd (builder, bbs [i]);
int mask_values [16];
// Implement shift using a shuffle
if (shift_right) {
for (int j = 0; j < nelems - i; ++j)
mask_values [j] = i + j;
for (int j = nelems -i ; j < nelems; ++j)
mask_values [j] = nelems;
} else {
for (int j = 0; j < i; ++j)
mask_values [j] = nelems;
for (int j = 0; j < nelems - i; ++j)
mask_values [j + i] = j;
}
phi_values [i] = LLVMBuildShuffleVector (builder, value, LLVMGetUndef (t), create_const_vector_i32 (mask_values, nelems), "");
LLVMBuildBr (builder, cbb);
}
/* Default case */
LLVMPositionBuilderAtEnd (builder, bbs [nelems]);
phi_values [nelems] = LLVMConstNull (t);
LLVMBuildBr (builder, cbb);
LLVMPositionBuilderAtEnd (builder, cbb);
values [ins->dreg] = LLVMBuildPhi (builder, LLVMTypeOf (phi_values [0]), "");
LLVMAddIncoming (values [ins->dreg], phi_values, bbs, nelems + 1);
values [ins->dreg] = convert (ctx, values [ins->dreg], type_to_sse_type (ins->inst_c1));
ctx->bblocks [bb->block_num].end_bblock = cbb;
break;
}
case OP_SSE2_PSRAW_IMM:
case OP_SSE2_PSRAD_IMM:
case OP_SSE2_PSRLW_IMM:
case OP_SSE2_PSRLD_IMM:
case OP_SSE2_PSRLQ_IMM: {
LLVMValueRef value = lhs;
LLVMValueRef index = rhs;
IntrinsicId id;
// FIXME: Optimize const index case
/* Use the non-immediate version */
switch (ins->opcode) {
case OP_SSE2_PSRAW_IMM: id = INTRINS_SSE_PSRA_W; break;
case OP_SSE2_PSRAD_IMM: id = INTRINS_SSE_PSRA_D; break;
case OP_SSE2_PSRLW_IMM: id = INTRINS_SSE_PSRL_W; break;
case OP_SSE2_PSRLD_IMM: id = INTRINS_SSE_PSRL_D; break;
case OP_SSE2_PSRLQ_IMM: id = INTRINS_SSE_PSRL_Q; break;
default: g_assert_not_reached (); break;
}
LLVMTypeRef t = LLVMTypeOf (value);
LLVMValueRef index_vect = LLVMBuildInsertElement (builder, LLVMConstNull (t), convert (ctx, index, LLVMGetElementType (t)), const_int32 (0), "");
LLVMValueRef args [] = { value, index_vect };
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE_SHUFPS:
case OP_SSE2_SHUFPD:
case OP_SSE2_PSHUFD:
case OP_SSE2_PSHUFHW:
case OP_SSE2_PSHUFLW: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef l = lhs;
LLVMValueRef r = rhs;
LLVMValueRef ctl = arg3;
const char *oname = "";
int ncases = 0;
switch (ins->opcode) {
case OP_SSE_SHUFPS: ncases = 256; break;
case OP_SSE2_SHUFPD: ncases = 4; break;
case OP_SSE2_PSHUFD: case OP_SSE2_PSHUFHW: case OP_SSE2_PSHUFLW: ncases = 256; r = lhs; ctl = rhs; break;
}
switch (ins->opcode) {
case OP_SSE_SHUFPS: oname = "sse_shufps"; break;
case OP_SSE2_SHUFPD: oname = "sse2_shufpd"; break;
case OP_SSE2_PSHUFD: oname = "sse2_pshufd"; break;
case OP_SSE2_PSHUFHW: oname = "sse2_pshufhw"; break;
case OP_SSE2_PSHUFLW: oname = "sse2_pshuflw"; break;
}
ctl = LLVMBuildAnd (builder, ctl, const_int32 (ncases - 1), "");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, ncases, ctl, ret_t, oname);
int mask_values [8];
int mask_len = 0;
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
switch (ins->opcode) {
case OP_SSE_SHUFPS:
mask_len = 4;
mask_values [0] = ((i >> 0) & 0x3) + 0; // take two elements from lhs
mask_values [1] = ((i >> 2) & 0x3) + 0;
mask_values [2] = ((i >> 4) & 0x3) + 4; // and two from rhs
mask_values [3] = ((i >> 6) & 0x3) + 4;
break;
case OP_SSE2_SHUFPD:
mask_len = 2;
mask_values [0] = ((i >> 0) & 0x1) + 0;
mask_values [1] = ((i >> 1) & 0x1) + 2;
break;
case OP_SSE2_PSHUFD:
/*
* Each 2 bits in mask selects 1 dword from the the source and copies it to the
* destination.
*/
mask_len = 4;
for (int j = 0; j < 4; ++j) {
int windex = (i >> (j * 2)) & 0x3;
mask_values [j] = windex;
}
break;
case OP_SSE2_PSHUFHW:
/*
* Each 2 bits in mask selects 1 word from the high quadword of the source and copies it to the
* high quadword of the destination.
*/
mask_len = 8;
/* The low quadword stays the same */
for (int j = 0; j < 4; ++j)
mask_values [j] = j;
for (int j = 0; j < 4; ++j) {
int windex = (i >> (j * 2)) & 0x3;
mask_values [j + 4] = 4 + windex;
}
break;
case OP_SSE2_PSHUFLW:
mask_len = 8;
/* The high quadword stays the same */
for (int j = 0; j < 4; ++j)
mask_values [j + 4] = j + 4;
for (int j = 0; j < 4; ++j) {
int windex = (i >> (j * 2)) & 0x3;
mask_values [j] = windex;
}
break;
}
LLVMValueRef mask = create_const_vector_i32 (mask_values, mask_len);
LLVMValueRef result = LLVMBuildShuffleVector (builder, l, r, mask, oname);
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t));
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE3_MOVDDUP: {
int mask [] = { 0, 0 };
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs,
LLVMGetUndef (LLVMTypeOf (lhs)),
create_const_vector_i32 (mask, 2), "");
break;
}
case OP_SSE3_MOVDDUP_MEM: {
LLVMValueRef undef = LLVMGetUndef (v128_r8_t);
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (r8_t, 0));
LLVMValueRef elem = mono_llvm_build_aligned_load (builder, addr, "sse3_movddup_mem", FALSE, 1);
LLVMValueRef val = LLVMBuildInsertElement (builder, undef, elem, const_int32 (0), "sse3_movddup_mem");
values [ins->dreg] = LLVMBuildShuffleVector (builder, val, undef, LLVMConstNull (LLVMVectorType (i4_t, 2)), "sse3_movddup_mem");
break;
}
case OP_SSE3_MOVSHDUP: {
int mask [] = { 1, 1, 3, 3 };
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), "");
break;
}
case OP_SSE3_MOVSLDUP: {
int mask [] = { 0, 0, 2, 2 };
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), "");
break;
}
case OP_SSSE3_SHUFFLE: {
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PSHUFB, args, dname);
break;
}
case OP_SSSE3_ABS: {
// %sub = sub <16 x i8> zeroinitializer, %arg
// %cmp = icmp sgt <16 x i8> %arg, zeroinitializer
// %abs = select <16 x i1> %cmp, <16 x i8> %arg, <16 x i8> %sub
LLVMTypeRef typ = type_to_sse_type (ins->inst_c1);
LLVMValueRef sub = LLVMBuildSub(builder, LLVMConstNull(typ), lhs, "");
LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntSGT, lhs, LLVMConstNull(typ), "");
LLVMValueRef abs = LLVMBuildSelect (builder, cmp, lhs, sub, "");
values [ins->dreg] = convert (ctx, abs, typ);
break;
}
case OP_SSSE3_ALIGNR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef zero = LLVMConstNull (v128_i1_t);
LLVMValueRef hivec = convert (ctx, lhs, v128_i1_t);
LLVMValueRef lovec = convert (ctx, rhs, v128_i1_t);
LLVMValueRef rshift_amount = convert (ctx, arg3, i1_t);
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 32, rshift_amount, v128_i1_t, "ssse3_alignr");
LLVMValueRef mask_values [16]; // 128-bit vector, 8-bit elements, 16 total elements
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
LLVMValueRef hi = NULL;
LLVMValueRef lo = NULL;
if (i <= 16) {
for (int j = 0; j < 16; j++)
mask_values [j] = const_int32 (i + j);
lo = lovec;
hi = hivec;
} else {
for (int j = 0; j < 16; j++)
mask_values [j] = const_int32 (i + j - 16);
lo = hivec;
hi = zero;
}
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, lo, hi, LLVMConstVector (mask_values, 16), "ssse3_alignr");
immediate_unroll_commit (&ictx, i, shuffled);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, zero);
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
values [ins->dreg] = convert (ctx, result, ret_t);
break;
}
case OP_SSE41_ROUNDP: {
LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE) };
values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDPS : INTRINS_SSE_ROUNDPD, args, dname);
break;
}
case OP_SSE41_ROUNDS: {
LLVMValueRef args [3];
args [0] = lhs;
args [1] = rhs;
args [2] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE);
values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDSS : INTRINS_SSE_ROUNDSD, args, dname);
break;
}
case OP_SSE41_DPPS:
case OP_SSE41_DPPD: {
/* Bits 0, 1, 4, 5 are meaningful for the control mask
* in dppd; all bits are meaningful for dpps.
*/
LLVMTypeRef ret_t = NULL;
LLVMValueRef mask = NULL;
int mask_bits = 0;
int high_shift = 0;
int low_mask = 0;
IntrinsicId iid = (IntrinsicId) 0;
const char *oname = "";
switch (ins->opcode) {
case OP_SSE41_DPPS:
ret_t = v128_r4_t;
mask = const_int8 (0xff); // 0b11111111
mask_bits = 8;
high_shift = 4;
low_mask = 0xf;
iid = INTRINS_SSE_DPPS;
oname = "sse41_dpps";
break;
case OP_SSE41_DPPD:
ret_t = v128_r8_t;
mask = const_int8 (0x33); // 0b00110011
mask_bits = 4;
high_shift = 2;
low_mask = 0x3;
iid = INTRINS_SSE_DPPD;
oname = "sse41_dppd";
break;
}
LLVMValueRef args [] = { lhs, rhs, NULL };
LLVMValueRef index = LLVMBuildAnd (builder, convert (ctx, arg3, i1_t), mask, oname);
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << mask_bits, index, ret_t, oname);
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int imm = ((i >> high_shift) << 4) | (i & low_mask);
args [2] = const_int8 (imm);
LLVMValueRef result = call_intrins (ctx, iid, args, dname);
immediate_unroll_commit (&ictx, imm, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t));
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_MPSADBW: {
LLVMValueRef args [] = {
convert (ctx, lhs, sse_i1_t),
convert (ctx, rhs, sse_i1_t),
NULL,
};
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
// Only 3 bits (bits 0-2) are used by mpsadbw and llvm.x86.sse41.mpsadbw
int used_bits = 0x7;
ctl = LLVMBuildAnd (builder, ctl, const_int8 (used_bits), "sse41_mpsadbw");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, used_bits + 1, ctl, v128_i2_t, "sse41_mpsadbw");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
args [2] = const_int8 (i);
LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_MPSADBW, args, "sse41_mpsadbw");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_unreachable_default (&ictx);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_INSERTPS: {
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
LLVMValueRef args [] = { lhs, rhs, NULL };
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, ctl, v128_r4_t, "sse41_insertps");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
args [2] = const_int8 (i);
LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_INSERTPS, args, dname);
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_unreachable_default (&ictx);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_BLEND: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
int nelem = LLVMGetVectorSize (ret_t);
g_assert (nelem >= 2 && nelem <= 8); // I2, U2, R4, R8
int unique_ctl_patterns = 1 << nelem;
int ctlmask = unique_ctl_patterns - 1;
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
ctl = LLVMBuildAnd (builder, ctl, const_int8 (ctlmask), "sse41_blend");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, unique_ctl_patterns, ctl, ret_t, "sse41_blend");
int i = 0;
int mask_values [MAX_VECTOR_ELEMS] = { 0 };
while (immediate_unroll_next (&ictx, &i)) {
for (int lane = 0; lane < nelem; ++lane) {
// n-bit in inst_c0 (control byte) is set to 1
gboolean bit_set = (i & (1 << lane)) >> lane;
mask_values [lane] = lane + (bit_set ? nelem : 0);
}
LLVMValueRef mask = create_const_vector_i32 (mask_values, nelem);
LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "sse41_blend");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t));
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_BLENDV: {
LLVMValueRef args [] = { lhs, rhs, values [ins->sreg3] };
if (ins->inst_c1 == MONO_TYPE_R4) {
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPS, args, dname);
} else if (ins->inst_c1 == MONO_TYPE_R8) {
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPD, args, dname);
} else {
// for other non-fp type just convert to <16 x i8> and pass to @llvm.x86.sse41.pblendvb
args [0] = LLVMBuildBitCast (ctx->builder, args [0], sse_i1_t, "");
args [1] = LLVMBuildBitCast (ctx->builder, args [1], sse_i1_t, "");
args [2] = LLVMBuildBitCast (ctx->builder, args [2], sse_i1_t, "");
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PBLENDVB, args, dname);
}
break;
}
case OP_SSE_CVTII: {
gboolean is_signed = (ins->inst_c1 == MONO_TYPE_I1) ||
(ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_I4);
LLVMTypeRef vec_type;
if ((ins->inst_c1 == MONO_TYPE_I1) || (ins->inst_c1 == MONO_TYPE_U1))
vec_type = sse_i1_t;
else if ((ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_U2))
vec_type = sse_i2_t;
else
vec_type = sse_i4_t;
LLVMValueRef value;
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) != LLVMVectorTypeKind) {
LLVMValueRef bitcasted = LLVMBuildBitCast (ctx->builder, lhs, LLVMPointerType (vec_type, 0), "");
value = mono_llvm_build_aligned_load (builder, bitcasted, "", FALSE, 1);
} else {
value = LLVMBuildBitCast (ctx->builder, lhs, vec_type, "");
}
LLVMValueRef mask_vec;
LLVMTypeRef dst_type;
if (ins->inst_c0 == MONO_TYPE_I2) {
mask_vec = create_const_vector_i32 (mask_0_incr_1, 8);
dst_type = sse_i2_t;
} else if (ins->inst_c0 == MONO_TYPE_I4) {
mask_vec = create_const_vector_i32 (mask_0_incr_1, 4);
dst_type = sse_i4_t;
} else {
g_assert (ins->inst_c0 == MONO_TYPE_I8);
mask_vec = create_const_vector_i32 (mask_0_incr_1, 2);
dst_type = sse_i8_t;
}
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, value,
LLVMGetUndef (vec_type), mask_vec, "");
if (is_signed)
values [ins->dreg] = LLVMBuildSExt (ctx->builder, shuffled, dst_type, "");
else
values [ins->dreg] = LLVMBuildZExt (ctx->builder, shuffled, dst_type, "");
break;
}
case OP_SSE41_LOADANT: {
LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0));
LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), "");
LLVMValueRef load = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, 16);
set_nontemporal_flag (load);
values [ins->dreg] = load;
break;
}
case OP_SSE41_MUL: {
const int shift_vals [] = { 32, 32 };
const LLVMValueRef args [] = {
convert (ctx, lhs, sse_i8_t),
convert (ctx, rhs, sse_i8_t),
};
LLVMValueRef mul_args [2] = { 0 };
LLVMValueRef shift_vec = create_const_vector (LLVMInt64Type (), shift_vals, 2);
for (int i = 0; i < 2; ++i) {
LLVMValueRef padded = LLVMBuildShl (builder, args [i], shift_vec, "");
mul_args[i] = mono_llvm_build_exact_ashr (builder, padded, shift_vec);
}
values [ins->dreg] = LLVMBuildNSWMul (builder, mul_args [0], mul_args [1], dname);
break;
}
case OP_SSE41_MULLO: {
values [ins->dreg] = LLVMBuildMul (ctx->builder, lhs, rhs, "");
break;
}
case OP_SSE42_CRC32:
case OP_SSE42_CRC64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = convert (ctx, rhs, primitive_type_to_llvm_type (ins->inst_c0));
IntrinsicId id;
switch (ins->inst_c0) {
case MONO_TYPE_U1: id = INTRINS_SSE_CRC32_32_8; break;
case MONO_TYPE_U2: id = INTRINS_SSE_CRC32_32_16; break;
case MONO_TYPE_U4: id = INTRINS_SSE_CRC32_32_32; break;
case MONO_TYPE_U8: id = INTRINS_SSE_CRC32_64_64; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_PCLMULQDQ: {
LLVMValueRef args [] = { lhs, rhs, NULL };
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
// Only bits 0 and 4 of the immediate operand are used by PCLMULQDQ.
ctl = LLVMBuildAnd (builder, ctl, const_int8 (0x11), "pclmulqdq");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << 2, ctl, v128_i8_t, "pclmulqdq");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int imm = ((i & 0x2) << 3) | (i & 0x1);
args [2] = const_int8 (imm);
LLVMValueRef result = call_intrins (ctx, INTRINS_PCLMULQDQ, args, "pclmulqdq");
immediate_unroll_commit (&ictx, imm, result);
}
immediate_unroll_unreachable_default (&ictx);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_AES_KEYGENASSIST: {
LLVMValueRef roundconstant = convert (ctx, rhs, i1_t);
LLVMValueRef args [] = { convert (ctx, lhs, v128_i8_t), NULL };
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, roundconstant, v128_i8_t, "aes_keygenassist");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
args [1] = const_int8 (i);
LLVMValueRef result = call_intrins (ctx, INTRINS_AESNI_AESKEYGENASSIST, args, "aes_keygenassist");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_unreachable_default (&ictx);
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
values [ins->dreg] = convert (ctx, result, v128_i1_t);
break;
}
#endif
case OP_XCOMPARE_FP: {
LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0];
LLVMValueRef cmp = LLVMBuildFCmp (builder, pred, lhs, rhs, "");
int nelems = LLVMGetVectorSize (LLVMTypeOf (cmp));
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
if (ins->inst_c1 == MONO_TYPE_R8)
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), nelems), ""), LLVMTypeOf (lhs), "");
else
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), nelems), ""), LLVMTypeOf (lhs), "");
break;
}
case OP_XCOMPARE: {
LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0];
LLVMValueRef cmp = LLVMBuildICmp (builder, pred, lhs, rhs, "");
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
values [ins->dreg] = LLVMBuildSExt (builder, cmp, LLVMTypeOf (lhs), "");
break;
}
case OP_POPCNT32:
values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I32, &lhs, "");
break;
case OP_POPCNT64:
values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I64, &lhs, "");
break;
case OP_CTTZ32:
case OP_CTTZ64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE);
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_CTTZ32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64, args, "");
break;
}
case OP_BMI1_BEXTR32:
case OP_BMI1_BEXTR64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = convert (ctx, rhs, ins->opcode == OP_BMI1_BEXTR32 ? i4_t : i8_t); // cast ushort to u32/u64
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BMI1_BEXTR32 ? INTRINS_BEXTR_I32 : INTRINS_BEXTR_I64, args, "");
break;
}
case OP_BZHI32:
case OP_BZHI64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = rhs;
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BZHI32 ? INTRINS_BZHI_I32 : INTRINS_BZHI_I64, args, "");
break;
}
case OP_MULX_H32:
case OP_MULX_H64:
case OP_MULX_HL32:
case OP_MULX_HL64: {
gboolean is_64 = ins->opcode == OP_MULX_H64 || ins->opcode == OP_MULX_HL64;
gboolean only_high = ins->opcode == OP_MULX_H32 || ins->opcode == OP_MULX_H64;
LLVMValueRef lx = LLVMBuildZExt (ctx->builder, lhs, LLVMInt128Type (), "");
LLVMValueRef rx = LLVMBuildZExt (ctx->builder, rhs, LLVMInt128Type (), "");
LLVMValueRef mulx = LLVMBuildMul (ctx->builder, lx, rx, "");
if (!only_high) {
LLVMValueRef addr = convert (ctx, arg3, LLVMPointerType (is_64 ? i8_t : i4_t, 0));
LLVMValueRef lowx = LLVMBuildTrunc (ctx->builder, mulx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), "");
LLVMBuildStore (ctx->builder, lowx, addr);
}
LLVMValueRef shift = LLVMConstInt (LLVMInt128Type (), is_64 ? 64 : 32, FALSE);
LLVMValueRef highx = LLVMBuildLShr (ctx->builder, mulx, shift, "");
values [ins->dreg] = LLVMBuildTrunc (ctx->builder, highx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), "");
break;
}
case OP_PEXT32:
case OP_PEXT64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = rhs;
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PEXT32 ? INTRINS_PEXT_I32 : INTRINS_PEXT_I64, args, "");
break;
}
case OP_PDEP32:
case OP_PDEP64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = rhs;
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PDEP32 ? INTRINS_PDEP_I32 : INTRINS_PDEP_I64, args, "");
break;
}
#endif /* defined(TARGET_X86) || defined(TARGET_AMD64) */
// Shared between ARM64 and X86
#if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_LZCNT32:
case OP_LZCNT64: {
IntrinsicId iid = ins->opcode == OP_LZCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64;
LLVMValueRef args [] = { lhs, const_int1 (FALSE) };
values [ins->dreg] = call_intrins (ctx, iid, args, "");
break;
}
#endif
#if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM)
case OP_XEQUAL: {
LLVMTypeRef t;
LLVMValueRef cmp, mask [MAX_VECTOR_ELEMS], shuffle;
int nelems;
#if defined(TARGET_WASM)
/* The wasm code generator doesn't understand the shuffle/and code sequence below */
LLVMValueRef val;
if (LLVMIsNull (lhs) || LLVMIsNull (rhs)) {
val = LLVMIsNull (lhs) ? rhs : lhs;
nelems = LLVMGetVectorSize (LLVMTypeOf (lhs));
IntrinsicId intrins = (IntrinsicId)0;
switch (nelems) {
case 16:
intrins = INTRINS_WASM_ANYTRUE_V16;
break;
case 8:
intrins = INTRINS_WASM_ANYTRUE_V8;
break;
case 4:
intrins = INTRINS_WASM_ANYTRUE_V4;
break;
case 2:
intrins = INTRINS_WASM_ANYTRUE_V2;
break;
default:
g_assert_not_reached ();
}
/* res = !wasm.anytrue (val) */
values [ins->dreg] = call_intrins (ctx, intrins, &val, "");
values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildICmp (builder, LLVMIntEQ, values [ins->dreg], LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""), LLVMInt32Type (), dname);
break;
}
#endif
LLVMTypeRef srcelemt = LLVMGetElementType (LLVMTypeOf (lhs));
//%c = icmp sgt <16 x i8> %a0, %a1
if (srcelemt == LLVMDoubleType () || srcelemt == LLVMFloatType ())
cmp = LLVMBuildFCmp (builder, LLVMRealOEQ, lhs, rhs, "");
else
cmp = LLVMBuildICmp (builder, LLVMIntEQ, lhs, rhs, "");
nelems = LLVMGetVectorSize (LLVMTypeOf (cmp));
LLVMTypeRef elemt;
if (srcelemt == LLVMDoubleType ())
elemt = LLVMInt64Type ();
else if (srcelemt == LLVMFloatType ())
elemt = LLVMInt32Type ();
else
elemt = srcelemt;
t = LLVMVectorType (elemt, nelems);
cmp = LLVMBuildSExt (builder, cmp, t, "");
// cmp is a <nelems x elemt> vector, each element is either 0xff... or 0
int half = nelems / 2;
while (half >= 1) {
// AND the top and bottom halfes into the bottom half
for (int i = 0; i < half; ++i)
mask [i] = LLVMConstInt (LLVMInt32Type (), half + i, FALSE);
for (int i = half; i < nelems; ++i)
mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
shuffle = LLVMBuildShuffleVector (builder, cmp, LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), "");
cmp = LLVMBuildAnd (builder, cmp, shuffle, "");
half = half / 2;
}
// Extract [0]
LLVMValueRef first_elem = LLVMBuildExtractElement (builder, cmp, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
// convert to 0/1
LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, first_elem, LLVMConstInt (elemt, 0, FALSE), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), "");
break;
}
#endif
#if defined(TARGET_ARM64)
case OP_XOP_I4_I4:
case OP_XOP_I8_I8: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
values [ins->dreg] = call_intrins (ctx, id, &lhs, "");
break;
}
case OP_XOP_X_X_X:
case OP_XOP_I4_I4_I4:
case OP_XOP_I4_I4_I8: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
gboolean zext_last = FALSE, bitcast_result = FALSE, getElement = FALSE;
int element_idx = -1;
switch (id) {
case INTRINS_AARCH64_PMULL64:
getElement = TRUE;
bitcast_result = TRUE;
element_idx = ins->inst_c1;
break;
case INTRINS_AARCH64_CRC32B:
case INTRINS_AARCH64_CRC32H:
case INTRINS_AARCH64_CRC32W:
case INTRINS_AARCH64_CRC32CB:
case INTRINS_AARCH64_CRC32CH:
case INTRINS_AARCH64_CRC32CW:
zext_last = TRUE;
break;
default:
break;
}
LLVMValueRef arg1 = rhs;
if (zext_last)
arg1 = LLVMBuildZExt (ctx->builder, arg1, LLVMInt32Type (), "");
LLVMValueRef args [] = { lhs, arg1 };
if (getElement) {
args [0] = LLVMBuildExtractElement (ctx->builder, args [0], const_int32 (element_idx), "");
args [1] = LLVMBuildExtractElement (ctx->builder, args [1], const_int32 (element_idx), "");
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
if (bitcast_result)
values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMVectorType (LLVMInt64Type (), 2));
break;
}
case OP_XOP_X_X_X_X: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
gboolean getLowerElement = FALSE;
int arg_idx = -1;
switch (id) {
case INTRINS_AARCH64_SHA1C:
case INTRINS_AARCH64_SHA1M:
case INTRINS_AARCH64_SHA1P:
getLowerElement = TRUE;
arg_idx = 1;
break;
default:
break;
}
LLVMValueRef args [] = { lhs, rhs, arg3 };
if (getLowerElement)
args [arg_idx] = LLVMBuildExtractElement (ctx->builder, args [arg_idx], const_int32 (0), "");
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_XOP_X_X: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean getLowerElement = FALSE;
switch (id) {
case INTRINS_AARCH64_SHA1H: getLowerElement = TRUE; break;
default: break;
}
LLVMValueRef arg0 = lhs;
if (getLowerElement)
arg0 = LLVMBuildExtractElement (ctx->builder, arg0, const_int32 (0), "");
LLVMValueRef result = call_intrins (ctx, id, &arg0, "");
if (getLowerElement)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_XCOMPARE_FP_SCALAR:
case OP_XCOMPARE_FP: {
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
gboolean scalar = ins->opcode == OP_XCOMPARE_FP_SCALAR;
LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0];
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMTypeRef reti_t = to_integral_vector_type (ret_t);
LLVMValueRef args [] = { lhs, rhs };
if (scalar)
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
LLVMValueRef result = LLVMBuildFCmp (builder, pred, args [0], args [1], "xcompare_fp");
if (scalar)
result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (reti_t)), result);
result = LLVMBuildSExt (builder, result, reti_t, "");
result = LLVMBuildBitCast (builder, result, ret_t, "");
values [ins->dreg] = result;
break;
}
case OP_XCOMPARE_SCALAR:
case OP_XCOMPARE: {
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
gboolean scalar = ins->opcode == OP_XCOMPARE_SCALAR;
LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0];
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef args [] = { lhs, rhs };
if (scalar)
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
LLVMValueRef result = LLVMBuildICmp (builder, pred, args [0], args [1], "xcompare");
if (scalar)
result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (ret_t)), result);
values [ins->dreg] = LLVMBuildSExt (builder, result, ret_t, "");
break;
}
case OP_ARM64_EXT: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (ret_t);
g_assert (elems <= ARM64_MAX_VECTOR_ELEMS);
LLVMValueRef index = arg3;
LLVMValueRef default_value = lhs;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, elems, index, ret_t, "arm64_ext");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
LLVMValueRef mask = create_const_vector_i32 (&mask_0_incr_1 [i], elems);
LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "arm64_ext");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, default_value);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_ARM64_MVN: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef result = bitcast_to_integral (ctx, lhs);
result = LLVMBuildNot (builder, result, "arm64_mvn");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_BIC: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef result = bitcast_to_integral (ctx, lhs);
LLVMValueRef mask = bitcast_to_integral (ctx, rhs);
mask = LLVMBuildNot (builder, mask, "");
result = LLVMBuildAnd (builder, mask, result, "arm64_bic");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_BSL: {
LLVMTypeRef ret_t = LLVMTypeOf (rhs);
LLVMValueRef select = bitcast_to_integral (ctx, lhs);
LLVMValueRef left = bitcast_to_integral (ctx, rhs);
LLVMValueRef right = bitcast_to_integral (ctx, arg3);
LLVMValueRef result1 = LLVMBuildAnd (builder, select, left, "arm64_bsl");
LLVMValueRef result2 = LLVMBuildAnd (builder, LLVMBuildNot (builder, select, ""), right, "");
LLVMValueRef result = LLVMBuildOr (builder, result1, result2, "");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_CMTST: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef l = bitcast_to_integral (ctx, lhs);
LLVMValueRef r = bitcast_to_integral (ctx, rhs);
LLVMValueRef result = LLVMBuildAnd (builder, l, r, "arm64_cmtst");
LLVMTypeRef t = LLVMTypeOf (l);
result = LLVMBuildICmp (builder, LLVMIntNE, result, LLVMConstNull (t), "");
result = LLVMBuildSExt (builder, result, t, "");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_FCVTL:
case OP_ARM64_FCVTL2: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean high = ins->opcode == OP_ARM64_FCVTL2;
LLVMValueRef result = lhs;
if (high)
result = extract_high_elements (ctx, result);
result = LLVMBuildFPExt (builder, result, ret_t, "arm64_fcvtl");
values [ins->dreg] = result;
break;
}
case OP_ARM64_FCVTXN:
case OP_ARM64_FCVTXN2:
case OP_ARM64_FCVTN:
case OP_ARM64_FCVTN2: {
gboolean high = FALSE;
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_FCVTXN2: high = TRUE; case OP_ARM64_FCVTXN: iid = INTRINS_AARCH64_ADV_SIMD_FCVTXN; break;
case OP_ARM64_FCVTN2: high = TRUE; break;
}
LLVMValueRef result = lhs;
if (high)
result = rhs;
if (iid)
result = call_intrins (ctx, iid, &result, "");
else
result = LLVMBuildFPTrunc (builder, result, v64_r4_t, "");
if (high)
result = concatenate_vectors (ctx, lhs, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_UCVTF:
case OP_ARM64_SCVTF:
case OP_ARM64_UCVTF_SCALAR:
case OP_ARM64_SCVTF_SCALAR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean scalar = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_UCVTF_SCALAR: scalar = TRUE; case OP_ARM64_UCVTF: is_unsigned = TRUE; break;
case OP_ARM64_SCVTF_SCALAR: scalar = TRUE; break;
}
LLVMValueRef result = lhs;
LLVMTypeRef cvt_t = ret_t;
if (scalar) {
result = scalar_from_vector (ctx, result);
cvt_t = LLVMGetElementType (ret_t);
}
if (is_unsigned)
result = LLVMBuildUIToFP (builder, result, cvt_t, "arm64_ucvtf");
else
result = LLVMBuildSIToFP (builder, result, cvt_t, "arm64_scvtf");
if (scalar)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_FCVTZS:
case OP_ARM64_FCVTZS_SCALAR:
case OP_ARM64_FCVTZU:
case OP_ARM64_FCVTZU_SCALAR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean scalar = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_FCVTZU_SCALAR: scalar = TRUE; case OP_ARM64_FCVTZU: is_unsigned = TRUE; break;
case OP_ARM64_FCVTZS_SCALAR: scalar = TRUE; break;
}
LLVMValueRef result = lhs;
LLVMTypeRef cvt_t = ret_t;
if (scalar) {
result = scalar_from_vector (ctx, result);
cvt_t = LLVMGetElementType (ret_t);
}
if (is_unsigned)
result = LLVMBuildFPToUI (builder, result, cvt_t, "arm64_fcvtzu");
else
result = LLVMBuildFPToSI (builder, result, cvt_t, "arm64_fcvtzs");
if (scalar)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SELECT_SCALAR: {
LLVMValueRef result = LLVMBuildExtractElement (builder, lhs, rhs, "");
LLVMTypeRef elem_t = LLVMTypeOf (result);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef t = LLVMVectorType (elem_t, 64 / elem_bits);
result = vector_from_scalar (ctx, t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SELECT_QUAD: {
LLVMTypeRef src_type = simd_class_to_llvm_type (ctx, ins->data.op [1].klass);
LLVMTypeRef ret_type = simd_class_to_llvm_type (ctx, ins->klass);
unsigned int src_type_bits = mono_llvm_get_prim_size_bits (src_type);
unsigned int ret_type_bits = mono_llvm_get_prim_size_bits (ret_type);
unsigned int src_intermediate_elems = src_type_bits / 32;
unsigned int ret_intermediate_elems = ret_type_bits / 32;
LLVMTypeRef intermediate_type = LLVMVectorType (i4_t, src_intermediate_elems);
LLVMValueRef result = LLVMBuildBitCast (builder, lhs, intermediate_type, "arm64_select_quad");
result = LLVMBuildExtractElement (builder, result, rhs, "arm64_select_quad");
result = broadcast_element (ctx, result, ret_intermediate_elems);
result = LLVMBuildBitCast (builder, result, ret_type, "arm64_select_quad");
values [ins->dreg] = result;
break;
}
case OP_LSCNT32:
case OP_LSCNT64: {
// %shr = ashr i32 %x, 31
// %xor = xor i32 %shr, %x
// %mul = shl i32 %xor, 1
// %add = or i32 %mul, 1
// %0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 false)
LLVMValueRef shr = LLVMBuildAShr (builder, lhs, ins->opcode == OP_LSCNT32 ?
LLVMConstInt (LLVMInt32Type (), 31, FALSE) :
LLVMConstInt (LLVMInt64Type (), 63, FALSE), "");
LLVMValueRef one = ins->opcode == OP_LSCNT32 ?
LLVMConstInt (LLVMInt32Type (), 1, FALSE) :
LLVMConstInt (LLVMInt64Type (), 1, FALSE);
LLVMValueRef xor = LLVMBuildXor (builder, shr, lhs, "");
LLVMValueRef mul = LLVMBuildShl (builder, xor, one, "");
LLVMValueRef add = LLVMBuildOr (builder, mul, one, "");
LLVMValueRef args [2];
args [0] = add;
args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE);
values [ins->dreg] = LLVMBuildCall (builder, get_intrins (ctx, ins->opcode == OP_LSCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64), args, 2, "");
break;
}
case OP_ARM64_SQRDMLAH:
case OP_ARM64_SQRDMLAH_BYSCALAR:
case OP_ARM64_SQRDMLAH_SCALAR:
case OP_ARM64_SQRDMLSH:
case OP_ARM64_SQRDMLSH_BYSCALAR:
case OP_ARM64_SQRDMLSH_SCALAR: {
gboolean byscalar = FALSE;
gboolean scalar = FALSE;
gboolean subtract = FALSE;
switch (ins->opcode) {
case OP_ARM64_SQRDMLAH_BYSCALAR: byscalar = TRUE; break;
case OP_ARM64_SQRDMLAH_SCALAR: scalar = TRUE; break;
case OP_ARM64_SQRDMLSH: subtract = TRUE; break;
case OP_ARM64_SQRDMLSH_BYSCALAR: subtract = TRUE; byscalar = TRUE; break;
case OP_ARM64_SQRDMLSH_SCALAR: subtract = TRUE; scalar = TRUE; break;
}
int acc_iid = subtract ? INTRINS_AARCH64_ADV_SIMD_SQSUB : INTRINS_AARCH64_ADV_SIMD_SQADD;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t);
ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins);
LLVMValueRef args [] = { lhs, rhs, arg3 };
if (byscalar) {
unsigned int elems = LLVMGetVectorSize (ret_t);
args [2] = broadcast_element (ctx, scalar_from_vector (ctx, args [2]), elems);
}
if (scalar) {
ovr_tag = sctx.ovr_tag;
scalar_op_from_vector_op_process_args (&sctx, args, 3);
}
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQRDMULH, ovr_tag, &args [1], "arm64_sqrdmlxh");
args [1] = result;
result = call_overloaded_intrins (ctx, acc_iid, ovr_tag, &args [0], "arm64_sqrdmlxh");
if (scalar)
result = scalar_op_from_vector_op_process_result (&sctx, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SMULH:
case OP_ARM64_UMULH: {
LLVMValueRef op1, op2;
if (ins->opcode == OP_ARM64_SMULH) {
op1 = LLVMBuildSExt (builder, lhs, LLVMInt128Type (), "");
op2 = LLVMBuildSExt (builder, rhs, LLVMInt128Type (), "");
} else {
op1 = LLVMBuildZExt (builder, lhs, LLVMInt128Type (), "");
op2 = LLVMBuildZExt (builder, rhs, LLVMInt128Type (), "");
}
LLVMValueRef mul = LLVMBuildMul (builder, op1, op2, "");
LLVMValueRef hi64 = LLVMBuildLShr (builder, mul,
LLVMConstInt (LLVMInt128Type (), 64, FALSE), "");
values [ins->dreg] = LLVMBuildTrunc (builder, hi64, LLVMInt64Type (), "");
break;
}
case OP_ARM64_XNARROW_SCALAR: {
// Unfortunately, @llvm.aarch64.neon.scalar.sqxtun isn't available for i8 or i16.
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
LLVMValueRef result = NULL;
int iid = ins->inst_c0;
int scalar_iid = 0;
switch (iid) {
case INTRINS_AARCH64_ADV_SIMD_SQXTUN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTUN; break;
case INTRINS_AARCH64_ADV_SIMD_SQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTN; break;
case INTRINS_AARCH64_ADV_SIMD_UQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_UQXTN; break;
default: g_assert_not_reached ();
}
if (elem_t == i4_t) {
LLVMValueRef arg = scalar_from_vector (ctx, lhs);
result = call_intrins (ctx, scalar_iid, &arg, "arm64_xnarrow_scalar");
result = vector_from_scalar (ctx, ret_t, result);
} else {
LLVMTypeRef arg_t = LLVMTypeOf (lhs);
LLVMTypeRef argelem_t = LLVMGetElementType (arg_t);
unsigned int argelems = LLVMGetVectorSize (arg_t);
LLVMValueRef arg = keep_lowest_element (ctx, LLVMVectorType (argelem_t, argelems * 2), lhs);
result = call_overloaded_intrins (ctx, iid, ovr_tag, &arg, "arm64_xnarrow_scalar");
result = keep_lowest_element (ctx, LLVMTypeOf (result), result);
}
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQXTUN2:
case OP_ARM64_UQXTN2:
case OP_ARM64_SQXTN2:
case OP_ARM64_XTN:
case OP_ARM64_XTN2: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean high = FALSE;
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_SQXTUN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTUN; break;
case OP_ARM64_UQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_UQXTN; break;
case OP_ARM64_SQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTN; break;
case OP_ARM64_XTN2: high = TRUE; break;
}
LLVMValueRef result = lhs;
if (high) {
result = rhs;
ovr_tag = ovr_tag_smaller_vector (ovr_tag);
}
LLVMTypeRef t = LLVMTypeOf (result);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits / 2), elems);
if (iid != 0)
result = call_overloaded_intrins (ctx, iid, ovr_tag, &result, "");
else
result = LLVMBuildTrunc (builder, result, result_t, "arm64_xtn");
if (high)
result = concatenate_vectors (ctx, lhs, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_CLZ: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef args [] = { lhs, const_int1 (0) };
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_CLZ, ovr_tag, args, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_FMSUB:
case OP_ARM64_FMSUB_BYSCALAR:
case OP_ARM64_FMSUB_SCALAR:
case OP_ARM64_FNMSUB_SCALAR:
case OP_ARM64_FMADD:
case OP_ARM64_FMADD_BYSCALAR:
case OP_ARM64_FMADD_SCALAR:
case OP_ARM64_FNMADD_SCALAR: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean scalar = FALSE;
gboolean negate = FALSE;
gboolean subtract = FALSE;
gboolean byscalar = FALSE;
switch (ins->opcode) {
case OP_ARM64_FMSUB: subtract = TRUE; break;
case OP_ARM64_FMSUB_BYSCALAR: subtract = TRUE; byscalar = TRUE; break;
case OP_ARM64_FMSUB_SCALAR: subtract = TRUE; scalar = TRUE; break;
case OP_ARM64_FNMSUB_SCALAR: subtract = TRUE; scalar = TRUE; negate = TRUE; break;
case OP_ARM64_FMADD: break;
case OP_ARM64_FMADD_BYSCALAR: byscalar = TRUE; break;
case OP_ARM64_FMADD_SCALAR: scalar = TRUE; break;
case OP_ARM64_FNMADD_SCALAR: scalar = TRUE; negate = TRUE; break;
}
// llvm.fma argument order: mulop1, mulop2, addend
LLVMValueRef args [] = { rhs, arg3, lhs };
if (byscalar) {
unsigned int elems = LLVMGetVectorSize (LLVMTypeOf (args [0]));
args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems);
}
if (scalar) {
ovr_tag = ovr_tag_force_scalar (ovr_tag);
for (int i = 0; i < 3; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
}
if (subtract)
args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_sub");
if (negate) {
args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_negate");
args [2] = LLVMBuildFNeg (builder, args [2], "arm64_fma_negate");
}
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_FMA, ovr_tag, args, "arm64_fma");
if (scalar)
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQDMULL:
case OP_ARM64_SQDMULL_BYSCALAR:
case OP_ARM64_SQDMULL2:
case OP_ARM64_SQDMULL2_BYSCALAR:
case OP_ARM64_SQDMLAL:
case OP_ARM64_SQDMLAL_BYSCALAR:
case OP_ARM64_SQDMLAL2:
case OP_ARM64_SQDMLAL2_BYSCALAR:
case OP_ARM64_SQDMLSL:
case OP_ARM64_SQDMLSL_BYSCALAR:
case OP_ARM64_SQDMLSL2:
case OP_ARM64_SQDMLSL2_BYSCALAR: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean scalar = FALSE;
gboolean add = FALSE;
gboolean subtract = FALSE;
gboolean high = FALSE;
switch (ins->opcode) {
case OP_ARM64_SQDMULL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL: break;
case OP_ARM64_SQDMULL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL2: high = TRUE; break;
case OP_ARM64_SQDMLAL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL: add = TRUE; break;
case OP_ARM64_SQDMLAL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL2: high = TRUE; add = TRUE; break;
case OP_ARM64_SQDMLSL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL: subtract = TRUE; break;
case OP_ARM64_SQDMLSL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL2: high = TRUE; subtract = TRUE; break;
}
int iid = 0;
if (add)
iid = INTRINS_AARCH64_ADV_SIMD_SQADD;
else if (subtract)
iid = INTRINS_AARCH64_ADV_SIMD_SQSUB;
LLVMValueRef mul1 = lhs;
LLVMValueRef mul2 = rhs;
if (iid != 0) {
mul1 = rhs;
mul2 = arg3;
}
if (scalar) {
LLVMTypeRef t = LLVMTypeOf (mul1);
unsigned int elems = LLVMGetVectorSize (t);
mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems);
}
LLVMValueRef args [] = { mul1, mul2 };
if (high)
for (int i = 0; i < 2; ++i)
args [i] = extract_high_elements (ctx, args [i]);
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQDMULL, ovr_tag, args, "");
LLVMValueRef args2 [] = { lhs, result };
if (iid != 0)
result = call_overloaded_intrins (ctx, iid, ovr_tag, args2, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQDMULL_SCALAR:
case OP_ARM64_SQDMLAL_SCALAR:
case OP_ARM64_SQDMLSL_SCALAR: {
/*
* define dso_local i32 @__vqdmlslh_lane_s16(i32, i16, <4 x i16>, i32) local_unnamed_addr #0 {
* %5 = insertelement <4 x i16> undef, i16 %1, i64 0
* %6 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef>
* %7 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %5, <4 x i16> %6)
* %8 = extractelement <4 x i32> %7, i64 0
* %9 = tail call i32 @llvm.aarch64.neon.sqsub.i32(i32 %0, i32 %8)
* ret i32 %9
* }
*
* define dso_local i64 @__vqdmlals_s32(i64, i32, i32) local_unnamed_addr #0 {
* %4 = tail call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %1, i32 %2) #2
* %5 = tail call i64 @llvm.aarch64.neon.sqadd.i64(i64 %0, i64 %4) #2
* ret i64 %5
* }
*/
int mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL;
int iid = 0;
gboolean scalar_mul_result = FALSE;
gboolean scalar_acc_result = FALSE;
switch (ins->opcode) {
case OP_ARM64_SQDMLAL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQADD; break;
case OP_ARM64_SQDMLSL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQSUB; break;
}
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef mularg = lhs;
LLVMValueRef selected_scalar = rhs;
if (iid != 0) {
mularg = rhs;
selected_scalar = arg3;
}
llvm_ovr_tag_t multag = ovr_tag_smaller_elements (ovr_tag_from_llvm_type (ret_t));
llvm_ovr_tag_t iidtag = ovr_tag_force_scalar (ovr_tag_from_llvm_type (ret_t));
LLVMTypeRef mularg_t = ovr_tag_to_llvm_type (multag);
if (multag & INTRIN_int32) {
/* The (i32, i32) -> i64 variant of aarch64_neon_sqdmull has
* a unique, non-overloaded name.
*/
mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL_SCALAR;
multag = 0;
iidtag = INTRIN_int64 | INTRIN_scalar;
scalar_mul_result = TRUE;
scalar_acc_result = TRUE;
} else if (multag & INTRIN_int16) {
/* We were passed a (<4 x i16>, <4 x i16>) but the
* widening multiplication intrinsic will yield a <4 x i32>.
*/
multag = INTRIN_int32 | INTRIN_vector128;
} else
g_assert_not_reached ();
if (scalar_mul_result) {
mularg = scalar_from_vector (ctx, mularg);
selected_scalar = scalar_from_vector (ctx, selected_scalar);
} else {
mularg = keep_lowest_element (ctx, mularg_t, mularg);
selected_scalar = keep_lowest_element (ctx, mularg_t, selected_scalar);
}
LLVMValueRef mulargs [] = { mularg, selected_scalar };
LLVMValueRef result = call_overloaded_intrins (ctx, mulid, multag, mulargs, "arm64_sqdmull_scalar");
if (iid != 0) {
LLVMValueRef acc = scalar_from_vector (ctx, lhs);
if (!scalar_mul_result)
result = scalar_from_vector (ctx, result);
LLVMValueRef subargs [] = { acc, result };
result = call_overloaded_intrins (ctx, iid, iidtag, subargs, "arm64_sqdmlxl_scalar");
scalar_acc_result = TRUE;
}
if (scalar_acc_result)
result = vector_from_scalar (ctx, ret_t, result);
else
result = keep_lowest_element (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_FMUL_SEL: {
LLVMValueRef mul2 = LLVMBuildExtractElement (builder, rhs, arg3, "");
LLVMValueRef mul1 = scalar_from_vector (ctx, lhs);
LLVMValueRef result = LLVMBuildFMul (builder, mul1, mul2, "arm64_fmul_sel");
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_MLA:
case OP_ARM64_MLA_SCALAR:
case OP_ARM64_MLS:
case OP_ARM64_MLS_SCALAR: {
gboolean scalar = FALSE;
gboolean add = FALSE;
switch (ins->opcode) {
case OP_ARM64_MLA_SCALAR: scalar = TRUE; case OP_ARM64_MLA: add = TRUE; break;
case OP_ARM64_MLS_SCALAR: scalar = TRUE; case OP_ARM64_MLS: break;
}
LLVMTypeRef mul_t = LLVMTypeOf (rhs);
unsigned int elems = LLVMGetVectorSize (mul_t);
LLVMValueRef mul2 = arg3;
if (scalar)
mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems);
LLVMValueRef result = LLVMBuildMul (builder, rhs, mul2, "");
if (add)
result = LLVMBuildAdd (builder, lhs, result, "");
else
result = LLVMBuildSub (builder, lhs, result, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SMULL:
case OP_ARM64_SMULL_SCALAR:
case OP_ARM64_SMULL2:
case OP_ARM64_SMULL2_SCALAR:
case OP_ARM64_UMULL:
case OP_ARM64_UMULL_SCALAR:
case OP_ARM64_UMULL2:
case OP_ARM64_UMULL2_SCALAR:
case OP_ARM64_SMLAL:
case OP_ARM64_SMLAL_SCALAR:
case OP_ARM64_SMLAL2:
case OP_ARM64_SMLAL2_SCALAR:
case OP_ARM64_UMLAL:
case OP_ARM64_UMLAL_SCALAR:
case OP_ARM64_UMLAL2:
case OP_ARM64_UMLAL2_SCALAR:
case OP_ARM64_SMLSL:
case OP_ARM64_SMLSL_SCALAR:
case OP_ARM64_SMLSL2:
case OP_ARM64_SMLSL2_SCALAR:
case OP_ARM64_UMLSL:
case OP_ARM64_UMLSL_SCALAR:
case OP_ARM64_UMLSL2:
case OP_ARM64_UMLSL2_SCALAR: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean is_unsigned = FALSE;
gboolean high = FALSE;
gboolean add = FALSE;
gboolean subtract = FALSE;
gboolean scalar = FALSE;
int opcode = ins->opcode;
switch (opcode) {
case OP_ARM64_SMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL; break;
case OP_ARM64_UMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL; break;
case OP_ARM64_SMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL; break;
case OP_ARM64_UMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL; break;
case OP_ARM64_SMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL; break;
case OP_ARM64_UMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL; break;
case OP_ARM64_SMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL2; break;
case OP_ARM64_UMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL2; break;
case OP_ARM64_SMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL2; break;
case OP_ARM64_UMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL2; break;
case OP_ARM64_SMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL2; break;
case OP_ARM64_UMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL2; break;
}
switch (opcode) {
case OP_ARM64_SMULL2: high = TRUE; case OP_ARM64_SMULL: break;
case OP_ARM64_UMULL2: high = TRUE; case OP_ARM64_UMULL: is_unsigned = TRUE; break;
case OP_ARM64_SMLAL2: high = TRUE; case OP_ARM64_SMLAL: add = TRUE; break;
case OP_ARM64_UMLAL2: high = TRUE; case OP_ARM64_UMLAL: add = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_SMLSL2: high = TRUE; case OP_ARM64_SMLSL: subtract = TRUE; break;
case OP_ARM64_UMLSL2: high = TRUE; case OP_ARM64_UMLSL: subtract = TRUE; is_unsigned = TRUE; break;
}
int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMULL : INTRINS_AARCH64_ADV_SIMD_SMULL;
LLVMValueRef intrin_args [] = { lhs, rhs };
if (add || subtract) {
intrin_args [0] = rhs;
intrin_args [1] = arg3;
}
if (scalar) {
LLVMValueRef sarg = intrin_args [1];
LLVMTypeRef t = LLVMTypeOf (intrin_args [0]);
unsigned int elems = LLVMGetVectorSize (t);
sarg = broadcast_element (ctx, scalar_from_vector (ctx, sarg), elems);
intrin_args [1] = sarg;
}
if (high)
for (int i = 0; i < 2; ++i)
intrin_args [i] = extract_high_elements (ctx, intrin_args [i]);
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
if (add)
result = LLVMBuildAdd (builder, lhs, result, "");
if (subtract)
result = LLVMBuildSub (builder, lhs, result, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_XNEG:
case OP_ARM64_XNEG_SCALAR: {
gboolean scalar = ins->opcode == OP_ARM64_XNEG_SCALAR;
gboolean is_float = FALSE;
switch (inst_c1_type (ins)) {
case MONO_TYPE_R4: case MONO_TYPE_R8: is_float = TRUE;
}
LLVMValueRef result = lhs;
if (scalar)
result = scalar_from_vector (ctx, result);
if (is_float)
result = LLVMBuildFNeg (builder, result, "arm64_xneg");
else
result = LLVMBuildNeg (builder, result, "arm64_xneg");
if (scalar)
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_PMULL:
case OP_ARM64_PMULL2: {
gboolean high = ins->opcode == OP_ARM64_PMULL2;
LLVMValueRef args [] = { lhs, rhs };
if (high)
for (int i = 0; i < 2; ++i)
args [i] = extract_high_elements (ctx, args [i]);
LLVMValueRef result = call_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_PMULL, args, "arm64_pmull");
values [ins->dreg] = result;
break;
}
case OP_ARM64_REVN: {
LLVMTypeRef t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int group_bits = mono_llvm_get_prim_size_bits (elem_t);
unsigned int vec_bits = mono_llvm_get_prim_size_bits (t);
unsigned int tmp_bits = ins->inst_c0;
unsigned int tmp_elements = vec_bits / tmp_bits;
const int cycle8 [] = { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
const int cycle4 [] = { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
const int cycle2 [] = { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
const int *cycle = NULL;
switch (group_bits / tmp_bits) {
case 2: cycle = cycle2; break;
case 4: cycle = cycle4; break;
case 8: cycle = cycle8; break;
default: g_assert_not_reached ();
}
g_assert (tmp_elements <= ARM64_MAX_VECTOR_ELEMS);
LLVMTypeRef tmp_t = LLVMVectorType (LLVMIntType (tmp_bits), tmp_elements);
LLVMValueRef tmp = LLVMBuildBitCast (builder, lhs, tmp_t, "arm64_revn");
LLVMValueRef result = LLVMBuildShuffleVector (builder, tmp, LLVMGetUndef (tmp_t), create_const_vector_i32 (cycle, tmp_elements), "");
result = LLVMBuildBitCast (builder, result, t, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SHL:
case OP_ARM64_SSHR:
case OP_ARM64_SSRA:
case OP_ARM64_USHR:
case OP_ARM64_USRA: {
gboolean right = FALSE;
gboolean add = FALSE;
gboolean arith = FALSE;
switch (ins->opcode) {
case OP_ARM64_USHR: right = TRUE; break;
case OP_ARM64_USRA: right = TRUE; add = TRUE; break;
case OP_ARM64_SSHR: arith = TRUE; break;
case OP_ARM64_SSRA: arith = TRUE; add = TRUE; break;
}
LLVMValueRef shiftarg = lhs;
LLVMValueRef shift = rhs;
if (add) {
shiftarg = rhs;
shift = arg3;
}
shift = create_shift_vector (ctx, shiftarg, shift);
LLVMValueRef result = NULL;
if (right)
result = LLVMBuildLShr (builder, shiftarg, shift, "");
else if (arith)
result = LLVMBuildAShr (builder, shiftarg, shift, "");
else
result = LLVMBuildShl (builder, shiftarg, shift, "");
if (add)
result = LLVMBuildAdd (builder, lhs, result, "arm64_usra");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SHRN:
case OP_ARM64_SHRN2: {
LLVMValueRef shiftarg = lhs;
LLVMValueRef shift = rhs;
gboolean high = ins->opcode == OP_ARM64_SHRN2;
if (high) {
shiftarg = rhs;
shift = arg3;
}
LLVMTypeRef arg_t = LLVMTypeOf (shiftarg);
LLVMTypeRef elem_t = LLVMGetElementType (arg_t);
unsigned int elems = LLVMGetVectorSize (arg_t);
unsigned int bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef trunc_t = LLVMVectorType (LLVMIntType (bits / 2), elems);
shift = create_shift_vector (ctx, shiftarg, shift);
LLVMValueRef result = LLVMBuildLShr (builder, shiftarg, shift, "shrn");
result = LLVMBuildTrunc (builder, result, trunc_t, "");
if (high) {
result = concatenate_vectors (ctx, lhs, result);
}
values [ins->dreg] = result;
break;
}
case OP_ARM64_SRSHR:
case OP_ARM64_SRSRA:
case OP_ARM64_URSHR:
case OP_ARM64_URSRA: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef shiftarg = lhs;
LLVMValueRef shift = rhs;
gboolean right = FALSE;
gboolean add = FALSE;
switch (ins->opcode) {
case OP_ARM64_URSRA: add = TRUE; case OP_ARM64_URSHR: right = TRUE; break;
case OP_ARM64_SRSRA: add = TRUE; case OP_ARM64_SRSHR: right = TRUE; break;
}
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_URSRA: case OP_ARM64_URSHR: iid = INTRINS_AARCH64_ADV_SIMD_URSHL; break;
case OP_ARM64_SRSRA: case OP_ARM64_SRSHR: iid = INTRINS_AARCH64_ADV_SIMD_SRSHL; break;
}
if (add) {
shiftarg = rhs;
shift = arg3;
}
if (right)
shift = LLVMBuildNeg (builder, shift, "");
shift = create_shift_vector (ctx, shiftarg, shift);
LLVMValueRef args [] = { shiftarg, shift };
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
if (add)
result = LLVMBuildAdd (builder, result, lhs, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_XNSHIFT_SCALAR:
case OP_ARM64_XNSHIFT:
case OP_ARM64_XNSHIFT2: {
LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t);
LLVMValueRef shift_arg = lhs;
LLVMValueRef shift_amount = rhs;
gboolean high = FALSE;
gboolean scalar = FALSE;
int iid = ins->inst_c0;
switch (ins->opcode) {
case OP_ARM64_XNSHIFT_SCALAR: scalar = TRUE; break;
case OP_ARM64_XNSHIFT2: high = TRUE; break;
}
if (high) {
shift_arg = rhs;
shift_amount = arg3;
ovr_tag = ovr_tag_smaller_vector (ovr_tag);
intrin_result_t = ovr_tag_to_llvm_type (ovr_tag);
}
LLVMTypeRef shift_arg_t = LLVMTypeOf (shift_arg);
LLVMTypeRef shift_arg_elem_t = LLVMGetElementType (shift_arg_t);
unsigned int element_bits = mono_llvm_get_prim_size_bits (shift_arg_elem_t);
int range_min = 1;
int range_max = element_bits / 2;
if (scalar) {
unsigned int elems = LLVMGetVectorSize (shift_arg_t);
LLVMValueRef lo = scalar_from_vector (ctx, shift_arg);
shift_arg = vector_from_scalar (ctx, LLVMVectorType (shift_arg_elem_t, elems * 2), lo);
}
int max_index = range_max - range_min + 1;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, shift_amount, intrin_result_t, "arm64_xnshift");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int shift_const = i + range_min;
LLVMValueRef intrin_args [] = { shift_arg, const_int32 (shift_const) };
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
immediate_unroll_commit (&ictx, shift_const, result);
}
{
immediate_unroll_default (&ictx);
LLVMValueRef intrin_args [] = { shift_arg, const_int32 (range_max) };
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
immediate_unroll_commit_default (&ictx, result);
}
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
if (high)
result = concatenate_vectors (ctx, lhs, result);
if (scalar)
result = keep_lowest_element (ctx, LLVMTypeOf (result), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQSHLU:
case OP_ARM64_SQSHLU_SCALAR: {
gboolean scalar = ins->opcode == OP_ARM64_SQSHLU_SCALAR;
LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (intrin_result_t);
unsigned int element_bits = mono_llvm_get_prim_size_bits (elem_t);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t);
int max_index = element_bits;
ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, intrin_result_t, ins);
intrin_result_t = scalar ? sctx.intermediate_type : intrin_result_t;
ovr_tag = scalar ? sctx.ovr_tag : ovr_tag;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, rhs, intrin_result_t, "arm64_sqshlu");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int shift_const = i;
LLVMValueRef args [2] = { lhs, create_shift_vector (ctx, lhs, const_int32 (shift_const)) };
if (scalar)
scalar_op_from_vector_op_process_args (&sctx, args, 2);
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQSHLU, ovr_tag, args, "");
immediate_unroll_commit (&ictx, shift_const, result);
}
{
immediate_unroll_default (&ictx);
LLVMValueRef srcarg = lhs;
if (scalar)
scalar_op_from_vector_op_process_args (&sctx, &srcarg, 1);
immediate_unroll_commit_default (&ictx, srcarg);
}
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
if (scalar)
result = scalar_op_from_vector_op_process_result (&sctx, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SSHLL:
case OP_ARM64_SSHLL2:
case OP_ARM64_USHLL:
case OP_ARM64_USHLL2: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean high = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_SSHLL2: high = TRUE; break;
case OP_ARM64_USHLL2: high = TRUE; case OP_ARM64_USHLL: is_unsigned = TRUE; break;
}
LLVMValueRef result = lhs;
if (high)
result = extract_high_elements (ctx, result);
if (is_unsigned)
result = LLVMBuildZExt (builder, result, ret_t, "arm64_ushll");
else
result = LLVMBuildSExt (builder, result, ret_t, "arm64_ushll");
result = LLVMBuildShl (builder, result, create_shift_vector (ctx, result, rhs), "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SLI:
case OP_ARM64_SRI: {
LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t);
unsigned int element_bits = mono_llvm_get_prim_size_bits (LLVMGetElementType (intrin_result_t));
int range_min = 0;
int range_max = element_bits - 1;
if (ins->opcode == OP_ARM64_SRI) {
++range_min;
++range_max;
}
int iid = ins->opcode == OP_ARM64_SRI ? INTRINS_AARCH64_ADV_SIMD_SRI : INTRINS_AARCH64_ADV_SIMD_SLI;
int max_index = range_max - range_min + 1;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, arg3, intrin_result_t, "arm64_ext");
LLVMValueRef intrin_args [3] = { lhs, rhs, arg3 };
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int shift_const = i + range_min;
intrin_args [2] = const_int32 (shift_const);
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
immediate_unroll_commit (&ictx, shift_const, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, lhs);
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQRT_SCALAR: {
int iid = ins->inst_c0 == MONO_TYPE_R8 ? INTRINS_SQRT : INTRINS_SQRTF;
LLVMTypeRef t = LLVMTypeOf (lhs);
LLVMValueRef scalar = LLVMBuildExtractElement (builder, lhs, const_int32 (0), "");
LLVMValueRef result = call_intrins (ctx, iid, &scalar, "arm64_sqrt_scalar");
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMGetUndef (t), result, const_int32 (0), "");
break;
}
case OP_ARM64_STP:
case OP_ARM64_STP_SCALAR:
case OP_ARM64_STNP:
case OP_ARM64_STNP_SCALAR: {
gboolean nontemporal = FALSE;
gboolean scalar = FALSE;
switch (ins->opcode) {
case OP_ARM64_STNP: nontemporal = TRUE; break;
case OP_ARM64_STNP_SCALAR: nontemporal = TRUE; scalar = TRUE; break;
case OP_ARM64_STP_SCALAR: scalar = TRUE; break;
}
LLVMTypeRef rhs_t = LLVMTypeOf (rhs);
LLVMValueRef val = NULL;
LLVMTypeRef dst_t = LLVMPointerType (rhs_t, 0);
if (scalar)
val = LLVMBuildShuffleVector (builder, rhs, arg3, create_const_vector_2_i32 (0, 2), "");
else {
unsigned int rhs_elems = LLVMGetVectorSize (rhs_t);
LLVMTypeRef rhs_elt_t = LLVMGetElementType (rhs_t);
dst_t = LLVMPointerType (LLVMVectorType (rhs_elt_t, rhs_elems * 2), 0);
val = concatenate_vectors (ctx, rhs, arg3);
}
LLVMValueRef address = convert (ctx, lhs, dst_t);
LLVMValueRef store = mono_llvm_build_store (builder, val, address, FALSE, LLVM_BARRIER_NONE);
if (nontemporal)
set_nontemporal_flag (store);
break;
}
case OP_ARM64_LD1_INSERT: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
LLVMValueRef address = convert (ctx, arg3, LLVMPointerType (elem_t, 0));
unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8;
LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1_insert", FALSE, alignment);
result = LLVMBuildInsertElement (builder, lhs, result, rhs, "arm64_ld1_insert");
values [ins->dreg] = result;
break;
}
case OP_ARM64_LD1R:
case OP_ARM64_LD1: {
gboolean replicate = ins->opcode == OP_ARM64_LD1R;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8;
LLVMValueRef address = lhs;
LLVMTypeRef address_t = LLVMPointerType (ret_t, 0);
if (replicate) {
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
address_t = LLVMPointerType (elem_t, 0);
}
address = convert (ctx, address, address_t);
LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1", FALSE, alignment);
if (replicate) {
unsigned int elems = LLVMGetVectorSize (ret_t);
result = broadcast_element (ctx, result, elems);
}
values [ins->dreg] = result;
break;
}
case OP_ARM64_LDNP:
case OP_ARM64_LDNP_SCALAR:
case OP_ARM64_LDP:
case OP_ARM64_LDP_SCALAR: {
const char *oname = NULL;
gboolean nontemporal = FALSE;
gboolean scalar = FALSE;
switch (ins->opcode) {
case OP_ARM64_LDNP: oname = "arm64_ldnp"; nontemporal = TRUE; break;
case OP_ARM64_LDNP_SCALAR: oname = "arm64_ldnp_scalar"; nontemporal = TRUE; scalar = TRUE; break;
case OP_ARM64_LDP: oname = "arm64_ldp"; break;
case OP_ARM64_LDP_SCALAR: oname = "arm64_ldp_scalar"; scalar = TRUE; break;
}
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (ins->klass), oname);
LLVMTypeRef ret_t = simd_valuetuple_to_llvm_type (ctx, ins->klass);
LLVMTypeRef vec_t = LLVMGetElementType (ret_t);
LLVMValueRef ix = const_int32 (1);
LLVMTypeRef src_t = LLVMPointerType (scalar ? LLVMGetElementType (vec_t) : vec_t, 0);
LLVMValueRef src0 = convert (ctx, lhs, src_t);
LLVMValueRef src1 = LLVMBuildGEP (builder, src0, &ix, 1, oname);
LLVMValueRef vals [] = { src0, src1 };
for (int i = 0; i < 2; ++i) {
vals [i] = LLVMBuildLoad (builder, vals [i], oname);
if (nontemporal)
set_nontemporal_flag (vals [i]);
}
unsigned int vec_sz = mono_llvm_get_prim_size_bits (vec_t);
if (scalar) {
g_assert (vec_sz == 64);
LLVMValueRef undef = LLVMGetUndef (vec_t);
for (int i = 0; i < 2; ++i)
vals [i] = LLVMBuildInsertElement (builder, undef, vals [i], const_int32 (0), oname);
}
LLVMValueRef val = LLVMGetUndef (ret_t);
for (int i = 0; i < 2; ++i)
val = LLVMBuildInsertValue (builder, val, vals [i], i, oname);
LLVMTypeRef retptr_t = LLVMPointerType (ret_t, 0);
LLVMValueRef dst = convert (ctx, addresses [ins->dreg], retptr_t);
LLVMBuildStore (builder, val, dst);
values [ins->dreg] = vec_sz == 64 ? val : NULL;
break;
}
case OP_ARM64_ST1: {
LLVMTypeRef t = LLVMTypeOf (rhs);
LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0));
unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8;
mono_llvm_build_aligned_store (builder, rhs, address, FALSE, alignment);
break;
}
case OP_ARM64_ST1_SCALAR: {
LLVMTypeRef t = LLVMGetElementType (LLVMTypeOf (rhs));
LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, arg3, "arm64_st1_scalar");
LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0));
unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8;
mono_llvm_build_aligned_store (builder, val, address, FALSE, alignment);
break;
}
case OP_ARM64_ADDHN:
case OP_ARM64_ADDHN2:
case OP_ARM64_SUBHN:
case OP_ARM64_SUBHN2:
case OP_ARM64_RADDHN:
case OP_ARM64_RADDHN2:
case OP_ARM64_RSUBHN:
case OP_ARM64_RSUBHN2: {
LLVMValueRef args [2] = { lhs, rhs };
gboolean high = FALSE;
gboolean subtract = FALSE;
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_ADDHN2: high = TRUE; case OP_ARM64_ADDHN: break;
case OP_ARM64_SUBHN2: high = TRUE; case OP_ARM64_SUBHN: subtract = TRUE; break;
case OP_ARM64_RSUBHN2: high = TRUE; case OP_ARM64_RSUBHN: iid = INTRINS_AARCH64_ADV_SIMD_RSUBHN; break;
case OP_ARM64_RADDHN2: high = TRUE; case OP_ARM64_RADDHN: iid = INTRINS_AARCH64_ADV_SIMD_RADDHN; break;
}
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
if (high) {
args [0] = rhs;
args [1] = arg3;
ovr_tag = ovr_tag_smaller_vector (ovr_tag);
}
LLVMValueRef result = NULL;
if (iid != 0)
result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
else {
LLVMTypeRef t = LLVMTypeOf (args [0]);
LLVMTypeRef elt_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elt_t);
if (subtract)
result = LLVMBuildSub (builder, args [0], args [1], "");
else
result = LLVMBuildAdd (builder, args [0], args [1], "");
result = LLVMBuildLShr (builder, result, broadcast_constant (elem_bits / 2, elt_t, elems), "");
result = LLVMBuildTrunc (builder, result, LLVMVectorType (LLVMIntType (elem_bits / 2), elems), "");
}
if (high)
result = concatenate_vectors (ctx, lhs, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SADD:
case OP_ARM64_UADD:
case OP_ARM64_SADD2:
case OP_ARM64_UADD2:
case OP_ARM64_SSUB:
case OP_ARM64_USUB:
case OP_ARM64_SSUB2:
case OP_ARM64_USUB2: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean is_unsigned = FALSE;
gboolean high = FALSE;
gboolean subtract = FALSE;
switch (ins->opcode) {
case OP_ARM64_SADD2: high = TRUE; case OP_ARM64_SADD: break;
case OP_ARM64_UADD2: high = TRUE; case OP_ARM64_UADD: is_unsigned = TRUE; break;
case OP_ARM64_SSUB2: high = TRUE; case OP_ARM64_SSUB: subtract = TRUE; break;
case OP_ARM64_USUB2: high = TRUE; case OP_ARM64_USUB: subtract = TRUE; is_unsigned = TRUE; break;
}
LLVMValueRef args [] = { lhs, rhs };
for (int i = 0; i < 2; ++i) {
LLVMValueRef arg = args [i];
LLVMTypeRef arg_t = LLVMTypeOf (arg);
if (high && arg_t != ret_t)
arg = extract_high_elements (ctx, arg);
if (is_unsigned)
arg = LLVMBuildZExt (builder, arg, ret_t, "");
else
arg = LLVMBuildSExt (builder, arg, ret_t, "");
args [i] = arg;
}
LLVMValueRef result = NULL;
if (subtract)
result = LLVMBuildSub (builder, args [0], args [1], "arm64_sub");
else
result = LLVMBuildAdd (builder, args [0], args [1], "arm64_add");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SABAL:
case OP_ARM64_SABAL2:
case OP_ARM64_UABAL:
case OP_ARM64_UABAL2:
case OP_ARM64_SABDL:
case OP_ARM64_SABDL2:
case OP_ARM64_UABDL:
case OP_ARM64_UABDL2:
case OP_ARM64_SABA:
case OP_ARM64_UABA:
case OP_ARM64_SABD:
case OP_ARM64_UABD: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean is_unsigned = FALSE;
gboolean high = FALSE;
gboolean add = FALSE;
gboolean widen = FALSE;
switch (ins->opcode) {
case OP_ARM64_SABAL2: high = TRUE; case OP_ARM64_SABAL: widen = TRUE; add = TRUE; break;
case OP_ARM64_UABAL2: high = TRUE; case OP_ARM64_UABAL: widen = TRUE; add = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_SABDL2: high = TRUE; case OP_ARM64_SABDL: widen = TRUE; break;
case OP_ARM64_UABDL2: high = TRUE; case OP_ARM64_UABDL: widen = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_SABA: add = TRUE; break;
case OP_ARM64_UABA: add = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_UABD: is_unsigned = TRUE; break;
}
LLVMValueRef args [] = { lhs, rhs };
if (add) {
args [0] = rhs;
args [1] = arg3;
}
if (high)
for (int i = 0; i < 2; ++i)
args [i] = extract_high_elements (ctx, args [i]);
int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UABD : INTRINS_AARCH64_ADV_SIMD_SABD;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (LLVMTypeOf (args [0]));
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
if (widen)
result = LLVMBuildZExt (builder, result, ret_t, "");
if (add)
result = LLVMBuildAdd (builder, result, lhs, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_XHORIZ: {
gboolean truncate = FALSE;
LLVMTypeRef arg_t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (arg_t);
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t);
if (elem_t == i1_t || elem_t == i2_t)
truncate = TRUE;
LLVMValueRef result = call_overloaded_intrins (ctx, ins->inst_c0, ovr_tag, &lhs, "");
if (truncate) {
// @llvm.aarch64.neon.saddv.i32.v8i16 ought to return an i16, but doesn't in LLVM 9.
result = LLVMBuildTrunc (builder, result, elem_t, "");
}
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SADDLV:
case OP_ARM64_UADDLV: {
LLVMTypeRef arg_t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (arg_t);
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t);
gboolean truncate = elem_t == i1_t;
int iid = ins->opcode == OP_ARM64_UADDLV ? INTRINS_AARCH64_ADV_SIMD_UADDLV : INTRINS_AARCH64_ADV_SIMD_SADDLV;
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, "");
if (truncate) {
// @llvm.aarch64.neon.saddlv.i32.v16i8 ought to return an i16, but doesn't in LLVM 9.
result = LLVMBuildTrunc (builder, result, i2_t, "");
}
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_UADALP:
case OP_ARM64_SADALP: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
int iid = ins->opcode == OP_ARM64_UADALP ? INTRINS_AARCH64_ADV_SIMD_UADDLP : INTRINS_AARCH64_ADV_SIMD_SADDLP;
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &rhs, "");
result = LLVMBuildAdd (builder, result, lhs, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_ADDP_SCALAR: {
llvm_ovr_tag_t ovr_tag = INTRIN_vector128 | INTRIN_int64;
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_UADDV, ovr_tag, &lhs, "arm64_addp_scalar");
result = LLVMBuildInsertElement (builder, LLVMConstNull (v64_i8_t), result, const_int32 (0), "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_FADDP_SCALAR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef hi = LLVMBuildExtractElement (builder, lhs, const_int32 (0), "");
LLVMValueRef lo = LLVMBuildExtractElement (builder, lhs, const_int32 (1), "");
LLVMValueRef result = LLVMBuildFAdd (builder, hi, lo, "arm64_faddp_scalar");
result = LLVMBuildInsertElement (builder, LLVMConstNull (ret_t), result, const_int32 (0), "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SXTL:
case OP_ARM64_SXTL2:
case OP_ARM64_UXTL:
case OP_ARM64_UXTL2: {
gboolean high = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_SXTL2: high = TRUE; break;
case OP_ARM64_UXTL2: high = TRUE; case OP_ARM64_UXTL: is_unsigned = TRUE; break;
}
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int elem_bits = LLVMGetIntTypeWidth (LLVMGetElementType (t));
unsigned int src_elems = LLVMGetVectorSize (t);
unsigned int dst_elems = src_elems;
LLVMValueRef arg = lhs;
if (high) {
arg = extract_high_elements (ctx, lhs);
dst_elems = LLVMGetVectorSize (LLVMTypeOf (arg));
}
LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits * 2), dst_elems);
LLVMValueRef result = NULL;
if (is_unsigned)
result = LLVMBuildZExt (builder, arg, result_t, "arm64_uxtl");
else
result = LLVMBuildSExt (builder, arg, result_t, "arm64_sxtl");
values [ins->dreg] = result;
break;
}
case OP_ARM64_TRN1:
case OP_ARM64_TRN2: {
gboolean high = ins->opcode == OP_ARM64_TRN2;
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
int laneix = high ? 1 : 0;
for (unsigned int i = 0; i < src_elems; i += 2) {
mask [i] = laneix;
mask [i + 1] = laneix + src_elems;
laneix += 2;
}
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp");
break;
}
case OP_ARM64_UZP1:
case OP_ARM64_UZP2: {
gboolean high = ins->opcode == OP_ARM64_UZP2;
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
int laneix = high ? 1 : 0;
for (unsigned int i = 0; i < src_elems; ++i) {
mask [i] = laneix;
laneix += 2;
}
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp");
break;
}
case OP_ARM64_ZIP1:
case OP_ARM64_ZIP2: {
gboolean high = ins->opcode == OP_ARM64_ZIP2;
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
int laneix = high ? src_elems / 2 : 0;
for (unsigned int i = 0; i < src_elems; i += 2) {
mask [i] = laneix;
mask [i + 1] = laneix + src_elems;
++laneix;
}
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_zip");
break;
}
case OP_ARM64_ABSCOMPARE: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
gboolean scalar = ins->inst_c1;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
ovr_tag = ovr_tag_corresponding_integer (ovr_tag);
LLVMValueRef args [] = { lhs, rhs };
LLVMTypeRef result_t = ret_t;
if (scalar) {
ovr_tag = ovr_tag_force_scalar (ovr_tag);
result_t = elem_t;
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
}
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
result = LLVMBuildBitCast (builder, result, result_t, "");
if (scalar)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_XOP_OVR_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, "");
break;
}
case OP_XOP_OVR_X_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
break;
}
case OP_XOP_OVR_X_X_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef args [] = { lhs, rhs, arg3 };
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
break;
}
case OP_XOP_OVR_BYSCALAR_X_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (t);
LLVMValueRef arg2 = broadcast_element (ctx, scalar_from_vector (ctx, rhs), elems);
LLVMValueRef args [] = { lhs, arg2 };
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
break;
}
case OP_XOP_OVR_SCALAR_X_X:
case OP_XOP_OVR_SCALAR_X_X_X:
case OP_XOP_OVR_SCALAR_X_X_X_X: {
int num_args = 0;
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
switch (ins->opcode) {
case OP_XOP_OVR_SCALAR_X_X: num_args = 1; break;
case OP_XOP_OVR_SCALAR_X_X_X: num_args = 2; break;
case OP_XOP_OVR_SCALAR_X_X_X_X: num_args = 3; break;
}
/* LLVM 9 NEON intrinsic functions have scalar overloads. Unfortunately
* only overloads for 32 and 64-bit integers and floating point types are
* supported. 8 and 16-bit integers are unsupported, and will fail during
* instruction selection. This is worked around by using a vector
* operation and then explicitly clearing the upper bits of the register.
*/
ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins);
LLVMValueRef args [3] = { lhs, rhs, arg3 };
scalar_op_from_vector_op_process_args (&sctx, args, num_args);
LLVMValueRef result = call_overloaded_intrins (ctx, iid, sctx.ovr_tag, args, "");
result = scalar_op_from_vector_op_process_result (&sctx, result);
values [ins->dreg] = result;
break;
}
#endif
case OP_DUMMY_USE:
break;
/*
* EXCEPTION HANDLING
*/
case OP_IMPLICIT_EXCEPTION:
/* This marks a place where an implicit exception can happen */
if (bb->region != -1)
set_failure (ctx, "implicit-exception");
break;
case OP_THROW:
case OP_RETHROW: {
gboolean rethrow = (ins->opcode == OP_RETHROW);
if (ctx->llvm_only) {
emit_llvmonly_throw (ctx, bb, rethrow, lhs);
has_terminator = TRUE;
ctx->unreachable [bb->block_num] = TRUE;
} else {
emit_throw (ctx, bb, rethrow, lhs);
builder = ctx->builder;
}
break;
}
case OP_CALL_HANDLER: {
/*
* We don't 'call' handlers, but instead simply branch to them.
* The code generated by ENDFINALLY will branch back to us.
*/
LLVMBasicBlockRef noex_bb;
GSList *bb_list;
BBInfo *info = &bblocks [ins->inst_target_bb->block_num];
bb_list = info->call_handler_return_bbs;
/*
* Set the indicator variable for the finally clause.
*/
lhs = info->finally_ind;
g_assert (lhs);
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), g_slist_length (bb_list) + 1, FALSE), lhs);
/* Branch to the finally clause */
LLVMBuildBr (builder, info->call_handler_target_bb);
noex_bb = gen_bb (ctx, "CALL_HANDLER_CONT_BB");
info->call_handler_return_bbs = g_slist_append_mempool (cfg->mempool, info->call_handler_return_bbs, noex_bb);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
bblocks [bb->block_num].end_bblock = noex_bb;
break;
}
case OP_START_HANDLER: {
break;
}
case OP_ENDFINALLY: {
LLVMBasicBlockRef resume_bb;
MonoBasicBlock *handler_bb;
LLVMValueRef val, switch_ins, callee;
GSList *bb_list;
BBInfo *info;
gboolean is_fault = MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FAULT;
/*
* Fault clauses are like finally clauses, but they are only called if an exception is thrown.
*/
if (!is_fault) {
handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region)));
g_assert (handler_bb);
info = &bblocks [handler_bb->block_num];
lhs = info->finally_ind;
g_assert (lhs);
bb_list = info->call_handler_return_bbs;
resume_bb = gen_bb (ctx, "ENDFINALLY_RESUME_BB");
/* Load the finally variable */
val = LLVMBuildLoad (builder, lhs, "");
/* Reset the variable */
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), lhs);
/* Branch to either resume_bb, or to the bblocks in bb_list */
switch_ins = LLVMBuildSwitch (builder, val, resume_bb, g_slist_length (bb_list));
/*
* The other targets are added at the end to handle OP_CALL_HANDLER
* opcodes processed later.
*/
info->endfinally_switch_ins_list = g_slist_append_mempool (cfg->mempool, info->endfinally_switch_ins_list, switch_ins);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, resume_bb);
}
if (ctx->llvm_only) {
if (!cfg->deopt) {
emit_resume_eh (ctx, bb);
} else {
/* Not needed */
LLVMBuildUnreachable (builder);
}
} else {
LLVMTypeRef icall_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE);
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline));
} else {
callee = get_jit_callee (ctx, "llvm_resume_unwind_trampoline", icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline));
}
LLVMBuildCall (builder, callee, NULL, 0, "");
LLVMBuildUnreachable (builder);
}
has_terminator = TRUE;
break;
}
case OP_ENDFILTER: {
g_assert (cfg->llvm_only && cfg->deopt);
LLVMBuildUnreachable (builder);
has_terminator = TRUE;
break;
}
case OP_IL_SEQ_POINT:
break;
default: {
char reason [128];
sprintf (reason, "opcode %s", mono_inst_name (ins->opcode));
set_failure (ctx, reason);
break;
}
}
if (!ctx_ok (ctx))
break;
/* Convert the value to the type required by phi nodes */
if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins) && ctx->vreg_types [ins->dreg]) {
if (ctx->is_vphi [ins->dreg])
/* vtypes */
values [ins->dreg] = addresses [ins->dreg];
else
values [ins->dreg] = convert (ctx, values [ins->dreg], ctx->vreg_types [ins->dreg]);
}
/* Add stores for volatile/ref variables */
if (spec [MONO_INST_DEST] != ' ' && spec [MONO_INST_DEST] != 'v' && !MONO_IS_STORE_MEMBASE (ins)) {
if (!skip_volatile_store)
emit_volatile_store (ctx, ins->dreg);
#ifdef TARGET_WASM
if (vreg_is_ref (cfg, ins->dreg) && ctx->values [ins->dreg])
emit_gc_pin (ctx, builder, ins->dreg);
#endif
}
}
if (!ctx_ok (ctx))
return;
if (!has_terminator && bb->next_bb && (bb == cfg->bb_entry || bb->in_count > 0)) {
LLVMBuildBr (builder, get_bb (ctx, bb->next_bb));
}
if (bb == cfg->bb_exit && sig->ret->type == MONO_TYPE_VOID) {
emit_dbg_loc (ctx, builder, cfg->header->code + cfg->header->code_size - 1);
LLVMBuildRetVoid (builder);
}
if (bb == cfg->bb_entry)
ctx->last_alloca = LLVMGetLastInstruction (get_bb (ctx, cfg->bb_entry));
}
/*
* mono_llvm_check_method_supported:
*
* Do some quick checks to decide whenever cfg->method can be compiled by LLVM, to avoid
* compiling a method twice.
*/
void
mono_llvm_check_method_supported (MonoCompile *cfg)
{
int i, j;
#ifdef TARGET_WASM
if (mono_method_signature_internal (cfg->method)->call_convention == MONO_CALL_VARARG) {
cfg->exception_message = g_strdup ("vararg callconv");
cfg->disable_llvm = TRUE;
return;
}
#endif
if (cfg->llvm_only)
return;
if (cfg->method->save_lmf) {
cfg->exception_message = g_strdup ("lmf");
cfg->disable_llvm = TRUE;
}
if (cfg->disable_llvm)
return;
/*
* Nested clauses where one of the clauses is a finally clause is
* not supported, because LLVM can't figure out the control flow,
* probably because we resume exception handling by calling our
* own function instead of using the 'resume' llvm instruction.
*/
for (i = 0; i < cfg->header->num_clauses; ++i) {
for (j = 0; j < cfg->header->num_clauses; ++j) {
MonoExceptionClause *clause1 = &cfg->header->clauses [i];
MonoExceptionClause *clause2 = &cfg->header->clauses [j];
// FIXME: Nested try clauses fail in some cases too, i.e. #37273
if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) {
//(clause1->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause2->flags == MONO_EXCEPTION_CLAUSE_FINALLY)) {
cfg->exception_message = g_strdup ("nested clauses");
cfg->disable_llvm = TRUE;
break;
}
}
}
if (cfg->disable_llvm)
return;
/* FIXME: */
if (cfg->method->dynamic) {
cfg->exception_message = g_strdup ("dynamic.");
cfg->disable_llvm = TRUE;
}
if (cfg->disable_llvm)
return;
}
static LLVMCallInfo*
get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
LLVMCallInfo *linfo;
int i;
if (cfg->gsharedvt && cfg->llvm_only && mini_is_gsharedvt_variable_signature (sig)) {
int i, n, pindex;
/*
* Gsharedvt methods have the following calling convention:
* - all arguments are passed by ref, even non generic ones
* - the return value is returned by ref too, using a vret
* argument passed after 'this'.
*/
n = sig->param_count + sig->hasthis;
linfo = (LLVMCallInfo*)mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
pindex = 0;
if (sig->hasthis)
linfo->args [pindex ++].storage = LLVMArgNormal;
if (sig->ret->type != MONO_TYPE_VOID) {
if (mini_is_gsharedvt_variable_type (sig->ret))
linfo->ret.storage = LLVMArgGsharedvtVariable;
else if (mini_type_is_vtype (sig->ret))
linfo->ret.storage = LLVMArgGsharedvtFixedVtype;
else
linfo->ret.storage = LLVMArgGsharedvtFixed;
linfo->vret_arg_index = pindex;
} else {
linfo->ret.storage = LLVMArgNone;
}
for (i = 0; i < sig->param_count; ++i) {
if (m_type_is_byref (sig->params [i]))
linfo->args [pindex].storage = LLVMArgNormal;
else if (mini_is_gsharedvt_variable_type (sig->params [i]))
linfo->args [pindex].storage = LLVMArgGsharedvtVariable;
else if (mini_type_is_vtype (sig->params [i]))
linfo->args [pindex].storage = LLVMArgGsharedvtFixedVtype;
else
linfo->args [pindex].storage = LLVMArgGsharedvtFixed;
linfo->args [pindex].type = sig->params [i];
pindex ++;
}
return linfo;
}
linfo = mono_arch_get_llvm_call_info (cfg, sig);
linfo->dummy_arg_pindex = -1;
for (i = 0; i < sig->param_count; ++i)
linfo->args [i + sig->hasthis].type = sig->params [i];
return linfo;
}
static void
emit_method_inner (EmitContext *ctx);
static void
free_ctx (EmitContext *ctx)
{
GSList *l;
g_free (ctx->values);
g_free (ctx->addresses);
g_free (ctx->vreg_types);
g_free (ctx->is_vphi);
g_free (ctx->vreg_cli_types);
g_free (ctx->is_dead);
g_free (ctx->unreachable);
g_free (ctx->gc_var_indexes);
g_ptr_array_free (ctx->phi_values, TRUE);
g_free (ctx->bblocks);
g_hash_table_destroy (ctx->region_to_handler);
g_hash_table_destroy (ctx->clause_to_handler);
g_hash_table_destroy (ctx->jit_callees);
g_ptr_array_free (ctx->callsite_list, TRUE);
g_free (ctx->method_name);
g_ptr_array_free (ctx->bblock_list, TRUE);
for (l = ctx->builders; l; l = l->next) {
LLVMBuilderRef builder = (LLVMBuilderRef)l->data;
LLVMDisposeBuilder (builder);
}
g_free (ctx);
}
static gboolean
is_linkonce_method (MonoMethod *method)
{
#ifdef TARGET_WASM
/*
* Under wasm, linkonce works, so use it instead of the dedup pass for wrappers at least.
* FIXME: Use for everything, i.e. can_dedup ().
* FIXME: Fails System.Core tests
* -> amodule->sorted_methods contains duplicates, screwing up jit tables.
*/
// FIXME: This works, but the aot data for the methods is still kept, so size still increases
#if 0
if (method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG)
return TRUE;
}
#endif
#endif
return FALSE;
}
/*
* mono_llvm_emit_method:
*
* Emit LLVM IL from the mono IL, and compile it to native code using LLVM.
*/
void
mono_llvm_emit_method (MonoCompile *cfg)
{
EmitContext *ctx;
char *method_name;
gboolean is_linkonce = FALSE;
int i;
if (cfg->skip)
return;
/* The code below might acquire the loader lock, so use it for global locking */
mono_loader_lock ();
ctx = g_new0 (EmitContext, 1);
ctx->cfg = cfg;
ctx->mempool = cfg->mempool;
/*
* This maps vregs to the LLVM instruction defining them
*/
ctx->values = g_new0 (LLVMValueRef, cfg->next_vreg);
/*
* This maps vregs for volatile variables to the LLVM instruction defining their
* address.
*/
ctx->addresses = g_new0 (LLVMValueRef, cfg->next_vreg);
ctx->vreg_types = g_new0 (LLVMTypeRef, cfg->next_vreg);
ctx->is_vphi = g_new0 (gboolean, cfg->next_vreg);
ctx->vreg_cli_types = g_new0 (MonoType*, cfg->next_vreg);
ctx->phi_values = g_ptr_array_sized_new (256);
/*
* This signals whenever the vreg was defined by a phi node with no input vars
* (i.e. all its input bblocks end with NOT_REACHABLE).
*/
ctx->is_dead = g_new0 (gboolean, cfg->next_vreg);
/* Whenever the bblock is unreachable */
ctx->unreachable = g_new0 (gboolean, cfg->max_block_num);
ctx->bblock_list = g_ptr_array_sized_new (256);
ctx->region_to_handler = g_hash_table_new (NULL, NULL);
ctx->clause_to_handler = g_hash_table_new (NULL, NULL);
ctx->callsite_list = g_ptr_array_new ();
ctx->jit_callees = g_hash_table_new (NULL, NULL);
if (cfg->compile_aot) {
ctx->module = &aot_module;
/*
* Allow the linker to discard duplicate copies of wrappers, generic instances etc. by using the 'linkonce'
* linkage for them. This requires the following:
* - the method needs to have a unique mangled name
* - llvmonly mode, since the code in aot-runtime.c would initialize got slots in the wrong aot image etc.
*/
if (ctx->module->llvm_only && ctx->module->static_link && is_linkonce_method (cfg->method))
is_linkonce = TRUE;
if (is_linkonce || mono_aot_is_externally_callable (cfg->method))
method_name = mono_aot_get_mangled_method_name (cfg->method);
else
method_name = mono_aot_get_method_name (cfg);
cfg->llvm_method_name = g_strdup (method_name);
} else {
ctx->module = init_jit_module ();
method_name = mono_method_full_name (cfg->method, TRUE);
}
ctx->method_name = method_name;
ctx->is_linkonce = is_linkonce;
if (cfg->compile_aot) {
ctx->lmodule = ctx->module->lmodule;
} else {
ctx->lmodule = LLVMModuleCreateWithName (g_strdup_printf ("jit-module-%s", cfg->method->name));
}
ctx->llvm_only = ctx->module->llvm_only;
#ifdef TARGET_WASM
ctx->emit_dummy_arg = TRUE;
#endif
emit_method_inner (ctx);
if (!ctx_ok (ctx)) {
if (ctx->lmethod) {
/* Need to add unused phi nodes as they can be referenced by other values */
LLVMBasicBlockRef phi_bb = LLVMAppendBasicBlock (ctx->lmethod, "PHI_BB");
LLVMBuilderRef builder;
builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, phi_bb);
for (i = 0; i < ctx->phi_values->len; ++i) {
LLVMValueRef v = (LLVMValueRef)g_ptr_array_index (ctx->phi_values, i);
if (LLVMGetInstructionParent (v) == NULL)
LLVMInsertIntoBuilder (builder, v);
}
if (ctx->module->llvm_only && ctx->module->static_link && cfg->interp) {
/* The caller will retry compilation */
LLVMDeleteFunction (ctx->lmethod);
} else if (ctx->module->llvm_only && ctx->module->static_link) {
// Keep a stub for the function since it might be called directly
int nbbs = LLVMCountBasicBlocks (ctx->lmethod);
LLVMBasicBlockRef *bblocks = g_new0 (LLVMBasicBlockRef, nbbs);
LLVMGetBasicBlocks (ctx->lmethod, bblocks);
for (int i = 0; i < nbbs; ++i)
LLVMDeleteBasicBlock (bblocks [i]);
LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (ctx->lmethod, "ENTRY");
builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, entry_bb);
ctx->builder = builder;
LLVMTypeRef sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception));
LLVMBuildCall (builder, callee, NULL, 0, "");
LLVMBuildUnreachable (builder);
} else {
LLVMDeleteFunction (ctx->lmethod);
}
}
}
free_ctx (ctx);
mono_loader_unlock ();
}
static void
emit_method_inner (EmitContext *ctx)
{
MonoCompile *cfg = ctx->cfg;
MonoMethodSignature *sig;
MonoBasicBlock *bb;
LLVMTypeRef method_type;
LLVMValueRef method = NULL;
LLVMValueRef *values = ctx->values;
int i, max_block_num, bb_index;
gboolean llvmonly_fail = FALSE;
LLVMCallInfo *linfo;
LLVMModuleRef lmodule = ctx->lmodule;
BBInfo *bblocks;
GPtrArray *bblock_list = ctx->bblock_list;
MonoMethodHeader *header;
MonoExceptionClause *clause;
char **names;
LLVMBuilderRef entry_builder = NULL;
LLVMBasicBlockRef entry_bb = NULL;
if (cfg->gsharedvt && !cfg->llvm_only) {
set_failure (ctx, "gsharedvt");
return;
}
#if 0
{
static int count = 0;
count ++;
char *llvm_count_str = g_getenv ("LLVM_COUNT");
if (llvm_count_str) {
int lcount = atoi (llvm_count_str);
g_free (llvm_count_str);
if (count == lcount) {
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
fflush (stdout);
}
if (count > lcount) {
set_failure (ctx, "count");
return;
}
}
}
#endif
// If we come upon one of the init_method wrappers, we need to find
// the method that we have already emitted and tell LLVM that this
// managed method info for the wrapper is associated with this method
// we constructed ourselves from LLVM IR.
//
// This is necessary to unwind through the init_method, in the case that
// it has to run a static cctor that throws an exception
if (cfg->method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
if (info->subtype == WRAPPER_SUBTYPE_AOT_INIT) {
method = get_init_func (ctx->module, info->d.aot_init.subtype);
ctx->lmethod = method;
ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index);
const char *init_name = mono_marshal_get_aot_init_wrapper_name (info->d.aot_init.subtype);
ctx->method_name = g_strdup_printf ("%s_%s", ctx->module->global_prefix, init_name);
ctx->cfg->asm_symbol = g_strdup (ctx->method_name);
if (!cfg->llvm_only && ctx->module->external_symbols) {
LLVMSetLinkage (method, LLVMExternalLinkage);
LLVMSetVisibility (method, LLVMHiddenVisibility);
}
/* Not looked up at runtime */
g_hash_table_insert (ctx->module->no_method_table_lmethods, method, method);
goto after_codegen;
} else if (info->subtype == WRAPPER_SUBTYPE_LLVM_FUNC) {
g_assert (info->d.llvm_func.subtype == LLVM_FUNC_WRAPPER_GC_POLL);
if (cfg->compile_aot) {
method = ctx->module->gc_poll_cold_wrapper;
g_assert (method);
} else {
method = emit_icall_cold_wrapper (ctx->module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, FALSE);
}
ctx->lmethod = method;
ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index);
ctx->method_name = g_strdup (LLVMGetValueName (method)); //g_strdup_printf ("%s_%s", ctx->module->global_prefix, LLVMGetValueName (method));
ctx->cfg->asm_symbol = g_strdup (ctx->method_name);
if (!cfg->llvm_only && ctx->module->external_symbols) {
LLVMSetLinkage (method, LLVMExternalLinkage);
LLVMSetVisibility (method, LLVMHiddenVisibility);
}
goto after_codegen;
}
}
sig = mono_method_signature_internal (cfg->method);
ctx->sig = sig;
linfo = get_llvm_call_info (cfg, sig);
ctx->linfo = linfo;
if (!ctx_ok (ctx))
return;
if (cfg->rgctx_var)
linfo->rgctx_arg = TRUE;
else if (needs_extra_arg (ctx, cfg->method))
linfo->dummy_arg = TRUE;
ctx->method_type = method_type = sig_to_llvm_sig_full (ctx, sig, linfo);
if (!ctx_ok (ctx))
return;
method = LLVMAddFunction (lmodule, ctx->method_name, method_type);
ctx->lmethod = method;
if (!cfg->llvm_only)
LLVMSetFunctionCallConv (method, LLVMMono1CallConv);
/* if the method doesn't contain
* (1) a call (so it's a leaf method)
* (2) and no loops
* we can skip the GC safepoint on method entry. */
gboolean requires_safepoint;
requires_safepoint = cfg->has_calls;
if (!requires_safepoint) {
for (bb = cfg->bb_entry->next_bb; bb; bb = bb->next_bb) {
if (bb->loop_body_start || (bb->flags & BB_EXCEPTION_HANDLER)) {
requires_safepoint = TRUE;
}
}
}
if (cfg->method->wrapper_type) {
if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC || cfg->method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER) {
requires_safepoint = FALSE;
} else {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
switch (info->subtype) {
case WRAPPER_SUBTYPE_GSHAREDVT_IN:
case WRAPPER_SUBTYPE_GSHAREDVT_OUT:
case WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG:
case WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG:
/* Arguments are not used after the call */
requires_safepoint = FALSE;
break;
}
}
}
ctx->has_safepoints = requires_safepoint;
if (!cfg->llvm_only && mono_threads_are_safepoints_enabled () && requires_safepoint) {
if (!cfg->compile_aot) {
LLVMSetGC (method, "coreclr");
emit_gc_safepoint_poll (ctx->module, ctx->lmodule, cfg);
} else {
LLVMSetGC (method, "coreclr");
}
}
LLVMSetLinkage (method, LLVMPrivateLinkage);
mono_llvm_add_func_attr (method, LLVM_ATTR_UW_TABLE);
if (cfg->disable_omit_fp)
mono_llvm_add_func_attr_nv (method, "frame-pointer", "all");
if (cfg->compile_aot) {
if (mono_aot_is_externally_callable (cfg->method)) {
LLVMSetLinkage (method, LLVMExternalLinkage);
} else {
LLVMSetLinkage (method, LLVMInternalLinkage);
//all methods have internal visibility when doing llvm_only
if (!cfg->llvm_only && ctx->module->external_symbols) {
LLVMSetLinkage (method, LLVMExternalLinkage);
LLVMSetVisibility (method, LLVMHiddenVisibility);
}
}
if (ctx->is_linkonce) {
LLVMSetLinkage (method, LLVMLinkOnceAnyLinkage);
LLVMSetVisibility (method, LLVMDefaultVisibility);
}
} else {
LLVMSetLinkage (method, LLVMExternalLinkage);
}
if (cfg->method->save_lmf && !cfg->llvm_only) {
set_failure (ctx, "lmf");
return;
}
if (sig->pinvoke && cfg->method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE && !cfg->llvm_only) {
set_failure (ctx, "pinvoke signature");
return;
}
#ifdef TARGET_WASM
if (ctx->module->interp && cfg->header->code_size > 100000 && !cfg->interp_entry_only) {
/* Large methods slow down llvm too much */
set_failure (ctx, "il code too large.");
return;
}
#endif
header = cfg->header;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT && clause->flags != MONO_EXCEPTION_CLAUSE_NONE) {
if (cfg->llvm_only) {
if (!cfg->deopt && !cfg->interp_entry_only)
llvmonly_fail = TRUE;
} else {
set_failure (ctx, "non-finally/catch/fault clause.");
return;
}
}
}
if (header->num_clauses || (cfg->method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || cfg->no_inline)
/* We can't handle inlined methods with clauses */
mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE);
for (int i = 0; i < cfg->header->num_clauses; i++) {
MonoExceptionClause *clause = &cfg->header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
ctx->has_catch = TRUE;
}
if (linfo->rgctx_arg) {
ctx->rgctx_arg = LLVMGetParam (method, linfo->rgctx_arg_pindex);
ctx->rgctx_arg_pindex = linfo->rgctx_arg_pindex;
/*
* We mark the rgctx parameter with the inreg attribute, which is mapped to
* MONO_ARCH_RGCTX_REG in the Mono calling convention in llvm, i.e.
* CC_X86_64_Mono in X86CallingConv.td.
*/
if (!ctx->llvm_only)
mono_llvm_add_param_attr (ctx->rgctx_arg, LLVM_ATTR_IN_REG);
LLVMSetValueName (ctx->rgctx_arg, "rgctx");
} else {
ctx->rgctx_arg_pindex = -1;
}
if (cfg->vret_addr) {
values [cfg->vret_addr->dreg] = LLVMGetParam (method, linfo->vret_arg_pindex);
LLVMSetValueName (values [cfg->vret_addr->dreg], "vret");
if (linfo->ret.storage == LLVMArgVtypeByRef) {
mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET);
mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS);
}
}
if (sig->hasthis) {
ctx->this_arg_pindex = linfo->this_arg_pindex;
ctx->this_arg = LLVMGetParam (method, linfo->this_arg_pindex);
values [cfg->args [0]->dreg] = ctx->this_arg;
LLVMSetValueName (values [cfg->args [0]->dreg], "this");
}
if (linfo->dummy_arg)
LLVMSetValueName (LLVMGetParam (method, linfo->dummy_arg_pindex), "dummy_arg");
names = g_new (char *, sig->param_count);
mono_method_get_param_names (cfg->method, (const char **) names);
/* Set parameter names/attributes */
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis];
char *name;
int pindex = ainfo->pindex + ainfo->ndummy_fpargs;
int j;
for (j = 0; j < ainfo->ndummy_fpargs; ++j) {
name = g_strdup_printf ("dummy_%d_%d", i, j);
LLVMSetValueName (LLVMGetParam (method, ainfo->pindex + j), name);
g_free (name);
}
if (ainfo->storage == LLVMArgVtypeInReg && ainfo->pair_storage [0] == LLVMArgNone && ainfo->pair_storage [1] == LLVMArgNone)
continue;
values [cfg->args [i + sig->hasthis]->dreg] = LLVMGetParam (method, pindex);
if (ainfo->storage == LLVMArgGsharedvtFixed || ainfo->storage == LLVMArgGsharedvtFixedVtype) {
if (names [i] && names [i][0] != '\0')
name = g_strdup_printf ("p_arg_%s", names [i]);
else
name = g_strdup_printf ("p_arg_%d", i);
} else {
if (names [i] && names [i][0] != '\0')
name = g_strdup_printf ("arg_%s", names [i]);
else
name = g_strdup_printf ("arg_%d", i);
}
LLVMSetValueName (LLVMGetParam (method, pindex), name);
g_free (name);
if (ainfo->storage == LLVMArgVtypeByVal)
mono_llvm_add_param_attr (LLVMGetParam (method, pindex), LLVM_ATTR_BY_VAL);
if (ainfo->storage == LLVMArgVtypeByRef || ainfo->storage == LLVMArgVtypeAddr) {
/* For OP_LDADDR */
cfg->args [i + sig->hasthis]->opcode = OP_VTARG_ADDR;
}
#ifdef TARGET_WASM
if (ainfo->storage == LLVMArgVtypeByRef) {
/* This causes llvm to make a copy of the value which is what we need */
mono_llvm_add_param_byval_attr (LLVMGetParam (method, pindex), LLVMGetElementType (LLVMTypeOf (LLVMGetParam (method, pindex))));
}
#endif
}
g_free (names);
if (ctx->module->emit_dwarf && cfg->compile_aot && mono_debug_enabled ()) {
ctx->minfo = mono_debug_lookup_method (cfg->method);
ctx->dbg_md = emit_dbg_subprogram (ctx, cfg, method, ctx->method_name);
}
max_block_num = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
max_block_num = MAX (max_block_num, bb->block_num);
ctx->bblocks = bblocks = g_new0 (BBInfo, max_block_num + 1);
/* Add branches between non-consecutive bblocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
bb->next_bb != bb->last_ins->inst_false_bb) {
MonoInst *inst = (MonoInst*)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
inst->opcode = OP_BR;
inst->inst_target_bb = bb->last_ins->inst_false_bb;
mono_bblock_add_inst (bb, inst);
}
}
/*
* Make a first pass over the code to precreate PHI nodes/set INDIRECT flags.
*/
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
LLVMBuilderRef builder;
char *dname;
char dname_buf[128];
builder = create_builder (ctx);
for (ins = bb->code; ins; ins = ins->next) {
switch (ins->opcode) {
case OP_PHI:
case OP_FPHI:
case OP_VPHI:
case OP_XPHI: {
LLVMTypeRef phi_type = llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)));
if (!ctx_ok (ctx))
return;
if (cfg->interp_entry_only)
break;
if (ins->opcode == OP_VPHI) {
/* Treat valuetype PHI nodes as operating on the address itself */
g_assert (ins->klass);
phi_type = LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)), 0);
}
/*
* Have to precreate these, as they can be referenced by
* earlier instructions.
*/
sprintf (dname_buf, "t%d", ins->dreg);
dname = dname_buf;
values [ins->dreg] = LLVMBuildPhi (builder, phi_type, dname);
if (ins->opcode == OP_VPHI)
ctx->addresses [ins->dreg] = values [ins->dreg];
g_ptr_array_add (ctx->phi_values, values [ins->dreg]);
/*
* Set the expected type of the incoming arguments since these have
* to have the same type.
*/
for (i = 0; i < ins->inst_phi_args [0]; i++) {
int sreg1 = ins->inst_phi_args [i + 1];
if (sreg1 != -1) {
if (ins->opcode == OP_VPHI)
ctx->is_vphi [sreg1] = TRUE;
ctx->vreg_types [sreg1] = phi_type;
}
}
break;
}
case OP_LDADDR:
((MonoInst*)ins->inst_p0)->flags |= MONO_INST_INDIRECT;
break;
default:
break;
}
}
}
/*
* Create an ordering for bblocks, use the depth first order first, then
* put the exception handling bblocks last.
*/
for (bb_index = 0; bb_index < cfg->num_bblocks; ++bb_index) {
bb = cfg->bblocks [bb_index];
if (!(bb->region != -1 && !MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY))) {
g_ptr_array_add (bblock_list, bb);
bblocks [bb->block_num].added = TRUE;
}
}
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (!bblocks [bb->block_num].added)
g_ptr_array_add (bblock_list, bb);
}
/*
* Second pass: generate code.
*/
// Emit entry point
entry_builder = create_builder (ctx);
entry_bb = get_bb (ctx, cfg->bb_entry);
LLVMPositionBuilderAtEnd (entry_builder, entry_bb);
emit_entry_bb (ctx, entry_builder);
if (llvmonly_fail)
/*
* In llvmonly mode, we want to emit an llvm method for every method even if it fails to compile,
* so direct calls can be made from outside the assembly.
*/
goto after_codegen_1;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
int clause_index;
char name [128];
if (ctx->cfg->interp_entry_only || !(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER)))
continue;
if (ctx->cfg->deopt && MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FILTER)
continue;
clause_index = MONO_REGION_CLAUSE_INDEX (bb->region);
g_hash_table_insert (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region)), bb);
g_hash_table_insert (ctx->clause_to_handler, GINT_TO_POINTER (clause_index), bb);
/*
* Create a new bblock which CALL_HANDLER/landing pads can branch to, because branching to the
* LLVM bblock containing a landing pad causes problems for the
* LLVM optimizer passes.
*/
sprintf (name, "BB%d_CALL_HANDLER_TARGET", bb->block_num);
ctx->bblocks [bb->block_num].call_handler_target_bb = LLVMAppendBasicBlock (ctx->lmethod, name);
}
// Make landing pads first
ctx->exc_meta = g_hash_table_new_full (NULL, NULL, NULL, NULL);
if (ctx->llvm_only && !ctx->cfg->interp_entry_only) {
size_t group_index = 0;
while (group_index < cfg->header->num_clauses) {
if (cfg->clause_is_dead [group_index]) {
group_index ++;
continue;
}
int count = 0;
size_t cursor = group_index;
while (cursor < cfg->header->num_clauses &&
CLAUSE_START (&cfg->header->clauses [cursor]) == CLAUSE_START (&cfg->header->clauses [group_index]) &&
CLAUSE_END (&cfg->header->clauses [cursor]) == CLAUSE_END (&cfg->header->clauses [group_index])) {
count++;
cursor++;
}
LLVMBasicBlockRef lpad_bb = emit_landing_pad (ctx, group_index, count);
intptr_t key = CLAUSE_END (&cfg->header->clauses [group_index]);
g_hash_table_insert (ctx->exc_meta, (gpointer)key, lpad_bb);
group_index = cursor;
}
}
for (bb_index = 0; bb_index < bblock_list->len; ++bb_index) {
bb = (MonoBasicBlock*)g_ptr_array_index (bblock_list, bb_index);
// Prune unreachable mono BBs.
if (!(bb == cfg->bb_entry || bb->in_count > 0))
continue;
process_bb (ctx, bb);
if (!ctx_ok (ctx))
return;
}
g_hash_table_destroy (ctx->exc_meta);
mono_memory_barrier ();
/* Add incoming phi values */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
GSList *l, *ins_list;
ins_list = bblocks [bb->block_num].phi_nodes;
for (l = ins_list; l; l = l->next) {
PhiNode *node = (PhiNode*)l->data;
MonoInst *phi = node->phi;
int sreg1 = node->sreg;
LLVMBasicBlockRef in_bb;
if (sreg1 == -1)
continue;
in_bb = get_end_bb (ctx, node->in_bb);
if (ctx->unreachable [node->in_bb->block_num])
continue;
if (phi->opcode == OP_VPHI) {
g_assert (LLVMTypeOf (ctx->addresses [sreg1]) == LLVMTypeOf (values [phi->dreg]));
LLVMAddIncoming (values [phi->dreg], &ctx->addresses [sreg1], &in_bb, 1);
} else {
if (!values [sreg1]) {
/* Can happen with values in EH clauses */
set_failure (ctx, "incoming phi sreg1");
return;
}
if (LLVMTypeOf (values [sreg1]) != LLVMTypeOf (values [phi->dreg])) {
set_failure (ctx, "incoming phi arg type mismatch");
return;
}
g_assert (LLVMTypeOf (values [sreg1]) == LLVMTypeOf (values [phi->dreg]));
LLVMAddIncoming (values [phi->dreg], &values [sreg1], &in_bb, 1);
}
}
}
/* Nullify empty phi instructions */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
GSList *l, *ins_list;
ins_list = bblocks [bb->block_num].phi_nodes;
for (l = ins_list; l; l = l->next) {
PhiNode *node = (PhiNode*)l->data;
MonoInst *phi = node->phi;
LLVMValueRef phi_ins = values [phi->dreg];
if (!phi_ins)
/* Already removed */
continue;
if (LLVMCountIncoming (phi_ins) == 0) {
mono_llvm_replace_uses_of (phi_ins, LLVMConstNull (LLVMTypeOf (phi_ins)));
LLVMInstructionEraseFromParent (phi_ins);
values [phi->dreg] = NULL;
}
}
}
/* Create the SWITCH statements for ENDFINALLY instructions */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
BBInfo *info = &bblocks [bb->block_num];
GSList *l;
for (l = info->endfinally_switch_ins_list; l; l = l->next) {
LLVMValueRef switch_ins = (LLVMValueRef)l->data;
GSList *bb_list = info->call_handler_return_bbs;
GSList *bb_list_iter;
i = 0;
for (bb_list_iter = bb_list; bb_list_iter; bb_list_iter = g_slist_next (bb_list_iter)) {
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i + 1, FALSE), (LLVMBasicBlockRef)bb_list_iter->data);
i ++;
}
}
}
ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index);
after_codegen_1:
if (llvmonly_fail) {
/*
* FIXME: Maybe fallback to interpreter
*/
static LLVMTypeRef sig;
ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb);
char *name = mono_method_get_full_name (cfg->method);
int len = strlen (name);
LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), len + 1);
LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, type, "missing_method_name");
LLVMSetVisibility (name_var, LLVMHiddenVisibility);
LLVMSetLinkage (name_var, LLVMInternalLinkage);
LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((guint8*)name, len + 1));
mono_llvm_set_is_constant (name_var);
g_free (name);
if (!sig)
sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE);
LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_aot_failed_exception));
LLVMValueRef args [] = { convert (ctx, name_var, ctx->module->ptr_type) };
LLVMBuildCall (ctx->builder, callee, args, 1, "");
LLVMBuildUnreachable (ctx->builder);
}
/* Initialize the method if needed */
if (cfg->compile_aot) {
// FIXME: Add more shared got entries
ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ctx->init_bb);
// FIXME: beforefieldinit
/*
* NATIVE_TO_MANAGED methods might be called on a thread not attached to the runtime, so they are initialized when loaded
* in load_method ().
*/
gboolean needs_init = ctx->cfg->got_access_count > 0;
MonoMethod *cctor = NULL;
if (!needs_init && (cctor = mono_class_get_cctor (cfg->method->klass))) {
/* Needs init to run the cctor */
if (cfg->method->flags & METHOD_ATTRIBUTE_STATIC)
needs_init = TRUE;
if (cctor == cfg->method)
needs_init = FALSE;
// If we are a constructor, we need to init so the static
// constructor gets called.
if (!strcmp (cfg->method->name, ".ctor"))
needs_init = TRUE;
}
if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
needs_init = FALSE;
if (needs_init)
emit_method_init (ctx);
else
LLVMBuildBr (ctx->builder, ctx->inited_bb);
// Was observing LLVM moving field accesses into the caller's method
// body before the init call (the inlined one), leading to NULL derefs
// after the init_method returns (GOT is filled out though)
if (needs_init)
mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE);
}
if (mini_get_debug_options ()->llvm_disable_inlining)
mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE);
after_codegen:
if (cfg->compile_aot)
g_ptr_array_add (ctx->module->cfgs, cfg);
if (cfg->llvm_only) {
/*
* Add the contents of ctx->callsite_list to module->callsite_list.
* We can't do this earlier, as it contains llvm instructions which can be
* freed if compilation fails.
* FIXME: Get rid of this when all methods can be llvm compiled.
*/
for (int i = 0; i < ctx->callsite_list->len; ++i)
g_ptr_array_add (ctx->module->callsite_list, g_ptr_array_index (ctx->callsite_list, i));
}
if (cfg->verbose_level > 1) {
g_print ("\n*** Unoptimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE));
if (cfg->compile_aot) {
mono_llvm_dump_value (method);
} else {
mono_llvm_dump_module (ctx->lmodule);
}
g_print ("***\n\n");
}
if (cfg->compile_aot && !cfg->llvm_only)
mark_as_used (ctx->module, method);
if (!cfg->llvm_only) {
LLVMValueRef md_args [16];
LLVMValueRef md_node;
int method_index;
if (cfg->compile_aot)
method_index = mono_aot_get_method_index (cfg->orig_method);
else
method_index = 1;
md_args [0] = LLVMMDString (ctx->method_name, strlen (ctx->method_name));
md_args [1] = LLVMConstInt (LLVMInt32Type (), method_index, FALSE);
md_node = LLVMMDNode (md_args, 2);
LLVMAddNamedMetadataOperand (lmodule, "mono.function_indexes", md_node);
//LLVMSetMetadata (method, md_kind, LLVMMDNode (&md_arg, 1));
}
if (cfg->compile_aot) {
/* Don't generate native code, keep the LLVM IR */
if (cfg->verbose_level) {
char *name = mono_method_get_full_name (cfg->method);
printf ("%s emitted as %s\n", name, ctx->method_name);
g_free (name);
}
#if 0
int err = LLVMVerifyFunction (ctx->lmethod, LLVMPrintMessageAction);
if (err != 0)
LLVMDumpValue (ctx->lmethod);
g_assert (err == 0);
#endif
} else {
//LLVMVerifyFunction (method, 0);
llvm_jit_finalize_method (ctx);
}
if (ctx->module->method_to_lmethod)
g_hash_table_insert (ctx->module->method_to_lmethod, cfg->method, ctx->lmethod);
if (ctx->module->idx_to_lmethod)
g_hash_table_insert (ctx->module->idx_to_lmethod, GINT_TO_POINTER (cfg->method_index), ctx->lmethod);
if (ctx->llvm_only && m_class_is_valuetype (cfg->orig_method->klass) && !(cfg->orig_method->flags & METHOD_ATTRIBUTE_STATIC))
emit_unbox_tramp (ctx, ctx->method_name, ctx->method_type, ctx->lmethod, cfg->method_index);
}
/*
* mono_llvm_create_vars:
*
* Same as mono_arch_create_vars () for LLVM.
*/
void
mono_llvm_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
sig = mono_method_signature_internal (cfg->method);
if (cfg->gsharedvt && cfg->llvm_only) {
gboolean vretaddr = FALSE;
if (mini_is_gsharedvt_variable_signature (sig) && sig->ret->type != MONO_TYPE_VOID) {
vretaddr = TRUE;
} else {
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
LLVMCallInfo *linfo;
linfo = get_llvm_call_info (cfg, sig);
vretaddr = (linfo->ret.storage == LLVMArgVtypeRetAddr || linfo->ret.storage == LLVMArgVtypeByRef || linfo->ret.storage == LLVMArgGsharedvtFixed || linfo->ret.storage == LLVMArgGsharedvtVariable || linfo->ret.storage == LLVMArgGsharedvtFixedVtype);
}
if (vretaddr) {
/*
* Creating vret_addr forces CEE_SETRET to store the result into it,
* so we don't have to generate any code in our OP_SETRET case.
*/
cfg->vret_addr = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_get_intptr_class ()), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
} else {
mono_arch_create_vars (cfg);
}
cfg->lmf_ir = TRUE;
}
/*
* mono_llvm_emit_call:
*
* Same as mono_arch_emit_call () for LLVM.
*/
void
mono_llvm_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *in;
MonoMethodSignature *sig;
int i, n;
LLVMArgInfo *ainfo;
sig = call->signature;
n = sig->param_count + sig->hasthis;
if (sig->call_convention == MONO_CALL_VARARG) {
cfg->exception_message = g_strdup ("varargs");
cfg->disable_llvm = TRUE;
return;
}
call->cinfo = get_llvm_call_info (cfg, sig);
if (cfg->disable_llvm)
return;
for (i = 0; i < n; ++i) {
MonoInst *ins;
ainfo = call->cinfo->args + i;
in = call->args [i];
/* Simply remember the arguments */
switch (ainfo->storage) {
case LLVMArgNormal: {
MonoType *t = (sig->hasthis && i == 0) ? m_class_get_byval_arg (mono_get_intptr_class ()) : ainfo->type;
int opcode;
opcode = mono_type_to_regmove (cfg, t);
if (opcode == OP_FMOVE) {
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
} else if (opcode == OP_LMOVE) {
MONO_INST_NEW (cfg, ins, OP_LMOVE);
ins->dreg = mono_alloc_lreg (cfg);
} else if (opcode == OP_RMOVE) {
MONO_INST_NEW (cfg, ins, OP_RMOVE);
ins->dreg = mono_alloc_freg (cfg);
} else {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
}
ins->sreg1 = in->dreg;
break;
}
case LLVMArgVtypeByVal:
case LLVMArgVtypeByRef:
case LLVMArgVtypeInReg:
case LLVMArgVtypeAddr:
case LLVMArgVtypeAsScalar:
case LLVMArgAsIArgs:
case LLVMArgAsFpArgs:
case LLVMArgGsharedvtVariable:
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
case LLVMArgWasmVtypeAsScalar:
MONO_INST_NEW (cfg, ins, OP_LLVM_OUTARG_VT);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
ins->inst_p0 = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMArgInfo));
memcpy (ins->inst_p0, ainfo, sizeof (LLVMArgInfo));
ins->inst_vtype = ainfo->type;
ins->klass = mono_class_from_mono_type_internal (ainfo->type);
break;
default:
cfg->exception_message = g_strdup ("ainfo->storage");
cfg->disable_llvm = TRUE;
return;
}
if (!cfg->disable_llvm) {
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, 0, FALSE);
}
}
}
static inline void
add_func (LLVMModuleRef module, const char *name, LLVMTypeRef ret_type, LLVMTypeRef *param_types, int nparams)
{
LLVMAddFunction (module, name, LLVMFunctionType (ret_type, param_types, nparams, FALSE));
}
static LLVMValueRef
add_intrins (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef *params, int nparams)
{
return mono_llvm_register_overloaded_intrinsic (module, id, params, nparams);
}
static LLVMValueRef
add_intrins1 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1)
{
return mono_llvm_register_overloaded_intrinsic (module, id, ¶m1, 1);
}
static LLVMValueRef
add_intrins2 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2)
{
LLVMTypeRef params [] = { param1, param2 };
return mono_llvm_register_overloaded_intrinsic (module, id, params, 2);
}
static LLVMValueRef
add_intrins3 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2, LLVMTypeRef param3)
{
LLVMTypeRef params [] = { param1, param2, param3 };
return mono_llvm_register_overloaded_intrinsic (module, id, params, 3);
}
static void
add_intrinsic (LLVMModuleRef module, int id)
{
/* Register simple intrinsics */
LLVMValueRef intrins = mono_llvm_register_intrinsic (module, (IntrinsicId)id);
if (intrins) {
g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins);
return;
}
if (intrin_arm64_ovr [id] != 0) {
llvm_ovr_tag_t spec = intrin_arm64_ovr [id];
for (int vw = 0; vw < INTRIN_vectorwidths; ++vw) {
for (int ew = 0; ew < INTRIN_elementwidths; ++ew) {
llvm_ovr_tag_t vec_bit = INTRIN_vector128 >> ((INTRIN_vectorwidths - 1) - vw);
llvm_ovr_tag_t elem_bit = INTRIN_int8 << ew;
llvm_ovr_tag_t test = vec_bit | elem_bit;
if ((spec & test) == test) {
uint8_t kind = intrin_kind [id];
LLVMTypeRef distinguishing_type = intrin_types [vw][ew];
if (kind == INTRIN_kind_ftoi && (elem_bit & (INTRIN_int32 | INTRIN_int64))) {
/*
* @llvm.aarch64.neon.fcvtas.v4i32.v4f32
* @llvm.aarch64.neon.fcvtas.v2i64.v2f64
*/
intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew + 2]);
} else if (kind == INTRIN_kind_widen) {
/*
* @llvm.aarch64.neon.saddlp.v2i64.v4i32
* @llvm.aarch64.neon.saddlp.v4i16.v8i8
*/
intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew - 1]);
} else if (kind == INTRIN_kind_widen_across) {
/*
* @llvm.aarch64.neon.saddlv.i64.v4i32
* @llvm.aarch64.neon.saddlv.i32.v8i16
* @llvm.aarch64.neon.saddlv.i32.v16i8
* i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9.
*/
int associated_prim = MAX(ew + 1, 2);
LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim];
intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type);
} else if (kind == INTRIN_kind_across) {
/*
* @llvm.aarch64.neon.uaddv.i64.v4i64
* @llvm.aarch64.neon.uaddv.i32.v4i32
* @llvm.aarch64.neon.uaddv.i32.v8i16
* @llvm.aarch64.neon.uaddv.i32.v16i8
* i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9.
*/
int associated_prim = MAX(ew, 2);
LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim];
intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type);
} else if (kind == INTRIN_kind_arm64_dot_prod) {
/*
* @llvm.aarch64.neon.sdot.v2i32.v8i8
* @llvm.aarch64.neon.sdot.v4i32.v16i8
*/
LLVMTypeRef associated_type = intrin_types [vw][0];
intrins = add_intrins2 (module, id, distinguishing_type, associated_type);
} else
intrins = add_intrins1 (module, id, distinguishing_type);
int key = key_from_id_and_tag (id, test);
g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (key), intrins);
}
}
}
return;
}
/* Register overloaded intrinsics */
switch (id) {
#define INTRINS(intrin_name, llvm_id, arch)
#define INTRINS_OVR(intrin_name, llvm_id, arch, llvm_type) case INTRINS_ ## intrin_name: intrins = add_intrins1(module, id, llvm_type); break;
#define INTRINS_OVR_2_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2) case INTRINS_ ## intrin_name: intrins = add_intrins2(module, id, llvm_type1, llvm_type2); break;
#define INTRINS_OVR_3_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2, llvm_type3) case INTRINS_ ## intrin_name: intrins = add_intrins3(module, id, llvm_type1, llvm_type2, llvm_type3); break;
#define INTRINS_OVR_TAG(...)
#define INTRINS_OVR_TAG_KIND(...)
#include "llvm-intrinsics.h"
default:
g_assert_not_reached ();
break;
}
g_assert (intrins);
g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins);
}
static LLVMValueRef
get_intrins_from_module (LLVMModuleRef lmodule, int id)
{
LLVMValueRef res;
res = (LLVMValueRef)g_hash_table_lookup (intrins_id_to_intrins, GINT_TO_POINTER (id));
g_assert (res);
return res;
}
static LLVMValueRef
get_intrins (EmitContext *ctx, int id)
{
return get_intrins_from_module (ctx->lmodule, id);
}
static void
add_intrinsics (LLVMModuleRef module)
{
int i;
/* Emit declarations of instrinsics */
/*
* It would be nicer to emit only the intrinsics actually used, but LLVM's Module
* type doesn't seem to do any locking.
*/
for (i = 0; i < INTRINS_NUM; ++i)
add_intrinsic (module, i);
/* EH intrinsics */
add_func (module, "mono_personality", LLVMVoidType (), NULL, 0);
add_func (module, "llvm_resume_unwind_trampoline", LLVMVoidType (), NULL, 0);
}
static void
add_types (MonoLLVMModule *module)
{
module->ptr_type = LLVMPointerType (TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type (), 0);
}
void
mono_llvm_init (gboolean enable_jit)
{
intrin_types [0][0] = i1_t = LLVMInt8Type ();
intrin_types [0][1] = i2_t = LLVMInt16Type ();
intrin_types [0][2] = i4_t = LLVMInt32Type ();
intrin_types [0][3] = i8_t = LLVMInt64Type ();
intrin_types [0][4] = r4_t = LLVMFloatType ();
intrin_types [0][5] = r8_t = LLVMDoubleType ();
intrin_types [1][0] = v64_i1_t = LLVMVectorType (LLVMInt8Type (), 8);
intrin_types [1][1] = v64_i2_t = LLVMVectorType (LLVMInt16Type (), 4);
intrin_types [1][2] = v64_i4_t = LLVMVectorType (LLVMInt32Type (), 2);
intrin_types [1][3] = v64_i8_t = LLVMVectorType (LLVMInt64Type (), 1);
intrin_types [1][4] = v64_r4_t = LLVMVectorType (LLVMFloatType (), 2);
intrin_types [1][5] = v64_r8_t = LLVMVectorType (LLVMDoubleType (), 1);
intrin_types [2][0] = v128_i1_t = sse_i1_t = type_to_sse_type (MONO_TYPE_I1);
intrin_types [2][1] = v128_i2_t = sse_i2_t = type_to_sse_type (MONO_TYPE_I2);
intrin_types [2][2] = v128_i4_t = sse_i4_t = type_to_sse_type (MONO_TYPE_I4);
intrin_types [2][3] = v128_i8_t = sse_i8_t = type_to_sse_type (MONO_TYPE_I8);
intrin_types [2][4] = v128_r4_t = sse_r4_t = type_to_sse_type (MONO_TYPE_R4);
intrin_types [2][5] = v128_r8_t = sse_r8_t = type_to_sse_type (MONO_TYPE_R8);
intrins_id_to_intrins = g_hash_table_new (NULL, NULL);
void_func_t = LLVMFunctionType0 (LLVMVoidType (), FALSE);
if (enable_jit)
mono_llvm_jit_init ();
}
void
mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager)
{
MonoLLVMModule *module = (MonoLLVMModule*)mem_manager->llvm_module;
int i;
if (!module)
return;
g_hash_table_destroy (module->llvm_types);
mono_llvm_dispose_ee (module->mono_ee);
if (module->bb_names) {
for (i = 0; i < module->bb_names_len; ++i)
g_free (module->bb_names [i]);
g_free (module->bb_names);
}
//LLVMDisposeModule (module->module);
g_free (module);
mem_manager->llvm_module = NULL;
}
void
mono_llvm_create_aot_module (MonoAssembly *assembly, const char *global_prefix, int initial_got_size, LLVMModuleFlags flags)
{
MonoLLVMModule *module = &aot_module;
gboolean emit_dwarf = (flags & LLVM_MODULE_FLAG_DWARF) ? 1 : 0;
#ifdef TARGET_WIN32_MSVC
gboolean emit_codeview = (flags & LLVM_MODULE_FLAG_CODEVIEW) ? 1 : 0;
#endif
gboolean static_link = (flags & LLVM_MODULE_FLAG_STATIC) ? 1 : 0;
gboolean llvm_only = (flags & LLVM_MODULE_FLAG_LLVM_ONLY) ? 1 : 0;
gboolean interp = (flags & LLVM_MODULE_FLAG_INTERP) ? 1 : 0;
/* Delete previous module */
g_hash_table_destroy (module->plt_entries);
if (module->lmodule)
LLVMDisposeModule (module->lmodule);
memset (module, 0, sizeof (aot_module));
module->lmodule = LLVMModuleCreateWithName ("aot");
module->assembly = assembly;
module->global_prefix = g_strdup (global_prefix);
module->eh_frame_symbol = g_strdup_printf ("%s_eh_frame", global_prefix);
module->get_method_symbol = g_strdup_printf ("%s_get_method", global_prefix);
module->get_unbox_tramp_symbol = g_strdup_printf ("%s_get_unbox_tramp", global_prefix);
module->init_aotconst_symbol = g_strdup_printf ("%s_init_aotconst", global_prefix);
module->external_symbols = TRUE;
module->emit_dwarf = emit_dwarf;
module->static_link = static_link;
module->llvm_only = llvm_only;
module->interp = interp;
/* The first few entries are reserved */
module->max_got_offset = initial_got_size;
module->context = LLVMGetGlobalContext ();
module->cfgs = g_ptr_array_new ();
module->aotconst_vars = g_hash_table_new (NULL, NULL);
module->llvm_types = g_hash_table_new (NULL, NULL);
module->plt_entries = g_hash_table_new (g_str_hash, g_str_equal);
module->plt_entries_ji = g_hash_table_new (NULL, NULL);
module->direct_callables = g_hash_table_new (g_str_hash, g_str_equal);
module->idx_to_lmethod = g_hash_table_new (NULL, NULL);
module->method_to_lmethod = g_hash_table_new (NULL, NULL);
module->method_to_call_info = g_hash_table_new (NULL, NULL);
module->idx_to_unbox_tramp = g_hash_table_new (NULL, NULL);
module->no_method_table_lmethods = g_hash_table_new (NULL, NULL);
module->callsite_list = g_ptr_array_new ();
if (llvm_only)
/* clang ignores our debug info because it has an invalid version */
module->emit_dwarf = FALSE;
add_intrinsics (module->lmodule);
add_types (module);
#ifdef MONO_ARCH_LLVM_TARGET_LAYOUT
LLVMSetDataLayout (module->lmodule, MONO_ARCH_LLVM_TARGET_LAYOUT);
#else
g_assert_not_reached ();
#endif
#ifdef MONO_ARCH_LLVM_TARGET_TRIPLE
LLVMSetTarget (module->lmodule, MONO_ARCH_LLVM_TARGET_TRIPLE);
#endif
if (module->emit_dwarf) {
char *dir, *build_info, *s, *cu_name;
module->di_builder = mono_llvm_create_di_builder (module->lmodule);
// FIXME:
dir = g_strdup (".");
build_info = mono_get_runtime_build_info ();
s = g_strdup_printf ("Mono AOT Compiler %s (LLVM)", build_info);
cu_name = g_path_get_basename (assembly->image->name);
module->cu = mono_llvm_di_create_compile_unit (module->di_builder, cu_name, dir, s);
g_free (dir);
g_free (build_info);
g_free (s);
}
#ifdef TARGET_WIN32_MSVC
if (emit_codeview) {
LLVMValueRef codeview_option_args[3];
codeview_option_args[0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
codeview_option_args[1] = LLVMMDString ("CodeView", 8);
codeview_option_args[2] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
LLVMAddNamedMetadataOperand (module->lmodule, "llvm.module.flags", LLVMMDNode (codeview_option_args, G_N_ELEMENTS (codeview_option_args)));
}
if (!static_link) {
const char linker_options[] = "Linker Options";
const char *default_dynamic_lib_names[] = { "/DEFAULTLIB:msvcrt",
"/DEFAULTLIB:ucrt.lib",
"/DEFAULTLIB:vcruntime.lib" };
LLVMValueRef default_lib_args[G_N_ELEMENTS (default_dynamic_lib_names)];
LLVMValueRef default_lib_nodes[G_N_ELEMENTS(default_dynamic_lib_names)];
const char *default_lib_name = NULL;
for (int i = 0; i < G_N_ELEMENTS (default_dynamic_lib_names); ++i) {
const char *default_lib_name = default_dynamic_lib_names[i];
default_lib_args[i] = LLVMMDString (default_lib_name, strlen (default_lib_name));
default_lib_nodes[i] = LLVMMDNode (default_lib_args + i, 1);
}
LLVMAddNamedMetadataOperand (module->lmodule, "llvm.linker.options", LLVMMDNode (default_lib_args, G_N_ELEMENTS (default_lib_args)));
}
#endif
{
LLVMTypeRef got_type = LLVMArrayType (module->ptr_type, 16);
module->dummy_got_var = LLVMAddGlobal (module->lmodule, got_type, "dummy_got");
module->got_idx_to_type = g_hash_table_new (NULL, NULL);
LLVMSetInitializer (module->dummy_got_var, LLVMConstNull (got_type));
LLVMSetVisibility (module->dummy_got_var, LLVMHiddenVisibility);
LLVMSetLinkage (module->dummy_got_var, LLVMInternalLinkage);
}
/* Add initialization array */
LLVMTypeRef inited_type = LLVMArrayType (LLVMInt8Type (), 0);
module->inited_var = LLVMAddGlobal (aot_module.lmodule, inited_type, "mono_inited_tmp");
LLVMSetInitializer (module->inited_var, LLVMConstNull (inited_type));
create_aot_info_var (module);
emit_gc_safepoint_poll (module, module->lmodule, NULL);
emit_llvm_code_start (module);
// Needs idx_to_lmethod
emit_init_funcs (module);
/* Add a dummy personality function */
if (!use_mono_personality_debug) {
LLVMValueRef personality = LLVMAddFunction (module->lmodule, default_personality_name, LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE));
LLVMSetLinkage (personality, LLVMExternalLinkage);
//EMCC chockes if the personality function is referenced in the 'used' array
#ifndef TARGET_WASM
mark_as_used (module, personality);
#endif
}
/* Add a reference to the c++ exception we throw/catch */
{
LLVMTypeRef exc = LLVMPointerType (LLVMInt8Type (), 0);
module->sentinel_exception = LLVMAddGlobal (module->lmodule, exc, "_ZTIPi");
LLVMSetLinkage (module->sentinel_exception, LLVMExternalLinkage);
mono_llvm_set_is_constant (module->sentinel_exception);
}
}
void
mono_llvm_fixup_aot_module (void)
{
MonoLLVMModule *module = &aot_module;
MonoMethod *method;
/*
* Replace GOT entries for directly callable methods with the methods themselves.
* It would be easier to implement this by predefining all methods before compiling
* their bodies, but that couldn't handle the case when a method fails to compile
* with llvm.
*/
GHashTable *specializable = g_hash_table_new (NULL, NULL);
GHashTable *patches_to_null = g_hash_table_new (mono_patch_info_hash, mono_patch_info_equal);
for (int sindex = 0; sindex < module->callsite_list->len; ++sindex) {
CallSite *site = (CallSite*)g_ptr_array_index (module->callsite_list, sindex);
method = site->method;
LLVMValueRef lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method);
LLVMValueRef placeholder = (LLVMValueRef)site->load;
LLVMValueRef load;
gboolean can_direct_call = FALSE;
/* Replace sharable instances with their shared version */
if (!lmethod && method->is_inflated) {
if (mono_method_is_generic_sharable_full (method, FALSE, TRUE, FALSE)) {
ERROR_DECL (error);
MonoMethod *shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
if (is_ok (error)) {
lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, shared);
if (lmethod)
method = shared;
}
}
}
if (lmethod && !m_method_is_synchronized (method)) {
can_direct_call = TRUE;
} else if (m_method_is_wrapper (method) && !method->is_inflated) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
/* This is a call from the synchronized wrapper to the real method */
if (info->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER) {
method = info->d.synchronized.method;
lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method);
if (lmethod)
can_direct_call = TRUE;
}
}
if (can_direct_call) {
mono_llvm_replace_uses_of (placeholder, lmethod);
if (mono_aot_can_specialize (method))
g_hash_table_insert (specializable, lmethod, method);
g_hash_table_insert (patches_to_null, site->ji, site->ji);
} else {
// FIXME:
LLVMBuilderRef builder = LLVMCreateBuilder ();
LLVMPositionBuilderBefore (builder, placeholder);
load = get_aotconst_module (module, builder, site->ji->type, site->ji->data.target, site->type, NULL, NULL);
LLVMReplaceAllUsesWith (placeholder, load);
}
g_free (site);
}
mono_llvm_propagate_nonnull_final (specializable, module);
g_hash_table_destroy (specializable);
for (int i = 0; i < module->cfgs->len; ++i) {
/*
* Nullify the patches pointing to direct calls. This is needed to
* avoid allocating extra got slots, which is a perf problem and it
* makes module->max_got_offset invalid.
* It would be better to just store the patch_info in CallSite, but
* cfg->patch_info is copied in aot-compiler.c.
*/
MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i);
for (MonoJumpInfo *patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_METHOD) {
if (g_hash_table_lookup (patches_to_null, patch_info)) {
patch_info->type = MONO_PATCH_INFO_NONE;
/* Nullify the call to init_method () if possible */
g_assert (cfg->got_access_count);
cfg->got_access_count --;
if (cfg->got_access_count == 0) {
LLVMValueRef br = (LLVMValueRef)cfg->llvmonly_init_cond;
if (br)
LLVMSetSuccessor (br, 0, LLVMGetSuccessor (br, 1));
}
}
}
}
}
g_hash_table_destroy (patches_to_null);
}
static LLVMValueRef
llvm_array_from_uints (LLVMTypeRef el_type, guint32 *values, int nvalues)
{
int i;
LLVMValueRef res, *vals;
vals = g_new0 (LLVMValueRef, nvalues);
for (i = 0; i < nvalues; ++i)
vals [i] = LLVMConstInt (LLVMInt32Type (), values [i], FALSE);
res = LLVMConstArray (LLVMInt32Type (), vals, nvalues);
g_free (vals);
return res;
}
static LLVMValueRef
llvm_array_from_bytes (guint8 *values, int nvalues)
{
int i;
LLVMValueRef res, *vals;
vals = g_new0 (LLVMValueRef, nvalues);
for (i = 0; i < nvalues; ++i)
vals [i] = LLVMConstInt (LLVMInt8Type (), values [i], FALSE);
res = LLVMConstArray (LLVMInt8Type (), vals, nvalues);
g_free (vals);
return res;
}
/*
* mono_llvm_emit_aot_file_info:
*
* Emit the MonoAotFileInfo structure.
* Same as emit_aot_file_info () in aot-compiler.c.
*/
void
mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
{
MonoLLVMModule *module = &aot_module;
/* Save these for later */
memcpy (&module->aot_info, info, sizeof (MonoAotFileInfo));
module->has_jitted_code = has_jitted_code;
}
/*
* mono_llvm_emit_aot_data:
*
* Emit the binary data DATA pointed to by symbol SYMBOL.
* Return the LLVM variable for the data.
*/
gpointer
mono_llvm_emit_aot_data_aligned (const char *symbol, guint8 *data, int data_len, int align)
{
MonoLLVMModule *module = &aot_module;
LLVMTypeRef type;
LLVMValueRef d;
type = LLVMArrayType (LLVMInt8Type (), data_len);
d = LLVMAddGlobal (module->lmodule, type, symbol);
LLVMSetVisibility (d, LLVMHiddenVisibility);
LLVMSetLinkage (d, LLVMInternalLinkage);
LLVMSetInitializer (d, mono_llvm_create_constant_data_array (data, data_len));
if (align != 1)
LLVMSetAlignment (d, align);
mono_llvm_set_is_constant (d);
return d;
}
gpointer
mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
{
return mono_llvm_emit_aot_data_aligned (symbol, data, data_len, 8);
}
/* Add a reference to a global defined in JITted code */
static LLVMValueRef
AddJitGlobal (MonoLLVMModule *module, LLVMTypeRef type, const char *name)
{
char *s;
LLVMValueRef v;
s = g_strdup_printf ("%s%s", module->global_prefix, name);
v = LLVMAddGlobal (module->lmodule, LLVMInt8Type (), s);
LLVMSetVisibility (v, LLVMHiddenVisibility);
g_free (s);
return v;
}
#define FILE_INFO_NUM_HEADER_FIELDS 2
#define FILE_INFO_NUM_SCALAR_FIELDS 23
#define FILE_INFO_NUM_ARRAY_FIELDS 5
#define FILE_INFO_NUM_AOTID_FIELDS 1
#define FILE_INFO_NFIELDS (FILE_INFO_NUM_HEADER_FIELDS + MONO_AOT_FILE_INFO_NUM_SYMBOLS + FILE_INFO_NUM_SCALAR_FIELDS + FILE_INFO_NUM_ARRAY_FIELDS + FILE_INFO_NUM_AOTID_FIELDS)
static void
create_aot_info_var (MonoLLVMModule *module)
{
LLVMTypeRef file_info_type;
LLVMTypeRef *eltypes;
LLVMValueRef info_var;
int i, nfields, tindex;
LLVMModuleRef lmodule = module->lmodule;
/* Create an LLVM type to represent MonoAotFileInfo */
nfields = FILE_INFO_NFIELDS;
eltypes = g_new (LLVMTypeRef, nfields);
tindex = 0;
eltypes [tindex ++] = LLVMInt32Type ();
eltypes [tindex ++] = LLVMInt32Type ();
/* Symbols */
for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i)
eltypes [tindex ++] = LLVMPointerType (LLVMInt8Type (), 0);
/* Scalars */
for (i = 0; i < FILE_INFO_NUM_SCALAR_FIELDS; ++i)
eltypes [tindex ++] = LLVMInt32Type ();
/* Arrays */
eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TABLE_NUM);
for (i = 0; i < FILE_INFO_NUM_ARRAY_FIELDS - 1; ++i)
eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TRAMP_NUM);
eltypes [tindex ++] = LLVMArrayType (LLVMInt8Type (), 16);
g_assert (tindex == nfields);
file_info_type = LLVMStructCreateNamed (module->context, "MonoAotFileInfo");
LLVMStructSetBody (file_info_type, eltypes, nfields, FALSE);
info_var = LLVMAddGlobal (lmodule, file_info_type, "mono_aot_file_info");
module->info_var = info_var;
module->info_var_eltypes = eltypes;
}
static void
emit_aot_file_info (MonoLLVMModule *module)
{
LLVMTypeRef *eltypes, eltype;
LLVMValueRef info_var;
LLVMValueRef *fields;
int i, nfields, tindex;
MonoAotFileInfo *info;
LLVMModuleRef lmodule = module->lmodule;
info = &module->aot_info;
info_var = module->info_var;
eltypes = module->info_var_eltypes;
nfields = FILE_INFO_NFIELDS;
if (module->static_link) {
LLVMSetVisibility (info_var, LLVMHiddenVisibility);
LLVMSetLinkage (info_var, LLVMInternalLinkage);
}
#ifdef TARGET_WIN32
if (!module->static_link) {
LLVMSetDLLStorageClass (info_var, LLVMDLLExportStorageClass);
}
#endif
fields = g_new (LLVMValueRef, nfields);
tindex = 0;
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->version, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->dummy, FALSE);
/* Symbols */
/*
* We use LLVMGetNamedGlobal () for symbol which are defined in LLVM code, and LLVMAddGlobal ()
* for symbols defined in the .s file emitted by the aot compiler.
*/
eltype = eltypes [tindex];
if (module->llvm_only)
fields [tindex ++] = LLVMConstNull (eltype);
else
fields [tindex ++] = AddJitGlobal (module, eltype, "jit_got");
/* llc defines this directly */
if (!module->llvm_only) {
fields [tindex ++] = LLVMAddGlobal (lmodule, eltype, module->eh_frame_symbol);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = module->get_method;
fields [tindex ++] = module->get_unbox_tramp ? module->get_unbox_tramp : LLVMConstNull (eltype);
}
fields [tindex ++] = module->init_aotconst_func;
if (module->has_jitted_code) {
fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_start");
fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_end");
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
if (!module->llvm_only)
fields [tindex ++] = AddJitGlobal (module, eltype, "method_addresses");
else
fields [tindex ++] = LLVMConstNull (eltype);
if (module->llvm_only && module->unbox_tramp_indexes) {
fields [tindex ++] = module->unbox_tramp_indexes;
fields [tindex ++] = module->unbox_trampolines;
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
if (info->flags & MONO_AOT_FILE_FLAG_SEPARATE_DATA) {
for (i = 0; i < MONO_AOT_TABLE_NUM; ++i)
fields [tindex ++] = LLVMConstNull (eltype);
} else {
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "blob");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_name_table");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "ex_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_table");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "got_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "llvm_got_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "image_table");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "weak_field_indexes");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_flags_table");
}
/* Not needed (mem_end) */
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_guid");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "runtime_version");
if (info->trampoline_size [0]) {
fields [tindex ++] = AddJitGlobal (module, eltype, "specific_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "static_rgctx_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "imt_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "gsharedvt_arg_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "ftnptr_arg_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_arbitrary_trampolines");
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
if (module->static_link && !module->llvm_only)
fields [tindex ++] = AddJitGlobal (module, eltype, "globals");
else
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_name");
if (!module->llvm_only) {
fields [tindex ++] = AddJitGlobal (module, eltype, "plt");
fields [tindex ++] = AddJitGlobal (module, eltype, "plt_end");
fields [tindex ++] = AddJitGlobal (module, eltype, "unwind_info");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines_end");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampoline_addresses");
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i) {
g_assert (fields [FILE_INFO_NUM_HEADER_FIELDS + i]);
fields [FILE_INFO_NUM_HEADER_FIELDS + i] = LLVMConstBitCast (fields [FILE_INFO_NUM_HEADER_FIELDS + i], eltype);
}
/* Scalars */
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_offset_base, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_info_offset_base, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->got_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->llvm_got_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nmethods, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nextra_methods, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->flags, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->opts, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->simd_opts, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->gc_name_index, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->num_rgctx_fetch_trampolines, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->double_align, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->long_align, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->generic_tramp_num, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_shift_bits, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_mask, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->tramp_page_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->call_table_entry_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nshared_got_entries, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->datafile_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_num, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_elemsize, FALSE);
/* Arrays */
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->table_offsets, MONO_AOT_TABLE_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->num_trampolines, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_got_offset_base, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_size, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->tramp_page_code_offsets, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_bytes (info->aotid, 16);
g_assert (tindex == nfields);
LLVMSetInitializer (info_var, LLVMConstNamedStruct (LLVMGetElementType (LLVMTypeOf (info_var)), fields, nfields));
if (module->static_link) {
char *s, *p;
LLVMValueRef var;
s = g_strdup_printf ("mono_aot_module_%s_info", module->assembly->aname.name);
/* Get rid of characters which cannot occur in symbols */
p = s;
for (p = s; *p; ++p) {
if (!(isalnum (*p) || *p == '_'))
*p = '_';
}
var = LLVMAddGlobal (module->lmodule, LLVMPointerType (LLVMInt8Type (), 0), s);
g_free (s);
LLVMSetInitializer (var, LLVMConstBitCast (LLVMGetNamedGlobal (module->lmodule, "mono_aot_file_info"), LLVMPointerType (LLVMInt8Type (), 0)));
LLVMSetLinkage (var, LLVMExternalLinkage);
}
}
typedef struct {
LLVMValueRef lmethod;
int argument;
} NonnullPropWorkItem;
static void
mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params)
{
if (mono_aot_can_specialize (call_method)) {
int num_passed = LLVMGetNumArgOperands (lcall);
g_assert (num_params <= num_passed);
g_assert (ctx->module->method_to_call_info);
GArray *call_site_union = (GArray *) g_hash_table_lookup (ctx->module->method_to_call_info, call_method);
if (!call_site_union) {
call_site_union = g_array_sized_new (FALSE, TRUE, sizeof (gint32), num_params);
int zero = 0;
for (int i = 0; i < num_params; i++)
g_array_insert_val (call_site_union, i, zero);
}
for (int i = 0; i < num_params; i++) {
if (mono_llvm_is_nonnull (args [i])) {
g_assert (i < LLVMGetNumArgOperands (lcall));
mono_llvm_set_call_nonnull_arg (lcall, i);
} else {
gint32 *nullable_count = &g_array_index (call_site_union, gint32, i);
*nullable_count = *nullable_count + 1;
}
}
g_hash_table_insert (ctx->module->method_to_call_info, call_method, call_site_union);
}
}
static void
mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module)
{
// When we first traverse the mini IL, we mark the things that are
// nonnull (the roots). Then, for all of the methods that can be specialized, we
// see if their call sites have nonnull attributes.
// If so, we mark the function's param. This param has uses to propagate
// the attribute to. This propagation can trigger a need to mark more attributes
// non-null, and so on and so forth.
GSList *queue = NULL;
GHashTableIter iter;
LLVMValueRef lmethod;
MonoMethod *method;
g_hash_table_iter_init (&iter, all_specializable);
while (g_hash_table_iter_next (&iter, (void**)&lmethod, (void**)&method)) {
GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, method);
// Basic sanity checking
if (call_site_union)
g_assert (call_site_union->len == LLVMCountParams (lmethod));
// Add root to work queue
for (int i = 0; call_site_union && i < call_site_union->len; i++) {
if (g_array_index (call_site_union, gint32, i) == 0) {
NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem));
item->lmethod = lmethod;
item->argument = i;
queue = g_slist_prepend (queue, item);
}
}
}
// This is essentially reference counting, and we are propagating
// the refcount decrement here. We have less work to do than we may otherwise
// because we are only working with a set of subgraphs of specializable functions.
//
// We rely on being able to see all of the references in the graph.
// This is ensured by the function mono_aot_can_specialize. Everything in
// all_specializable is a function that can be specialized, and is the resulting
// node in the graph after all of the subsitutions are done.
//
// Anything disrupting the direct calls made with self-init will break this optimization.
while (queue) {
// Update the queue state.
// Our only other per-iteration responsibility is now to free current
NonnullPropWorkItem *current = (NonnullPropWorkItem *) queue->data;
queue = queue->next;
g_assert (current->argument < LLVMCountParams (current->lmethod));
// Does the actual leaf-node work here
// Mark the function argument as nonnull for LLVM
mono_llvm_set_func_nonnull_arg (current->lmethod, current->argument);
// The rest of this is for propagating forward nullability changes
// to calls that use the argument that is now nullable.
// Get the actual LLVM value of the argument, so we can see which call instructions
// used that argument
LLVMValueRef caller_argument = LLVMGetParam (current->lmethod, current->argument);
// Iterate over the calls using the newly-non-nullable argument
GSList *calls = mono_llvm_calls_using (caller_argument);
for (GSList *cursor = calls; cursor != NULL; cursor = cursor->next) {
LLVMValueRef lcall = (LLVMValueRef) cursor->data;
LLVMValueRef callee_lmethod = LLVMGetCalledValue (lcall);
// If this wasn't a direct call for which mono_aot_can_specialize is true,
// this lookup won't find a MonoMethod.
MonoMethod *callee_method = (MonoMethod *) g_hash_table_lookup (all_specializable, callee_lmethod);
if (!callee_method)
continue;
// Decrement number of nullable refs at that func's arg offset
GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, callee_method);
// It has module-local callers and is specializable, should have seen this call site
// and inited this
g_assert (call_site_union);
// The function *definition* parameter arity should always be consistent
int max_params = LLVMCountParams (callee_lmethod);
if (call_site_union->len != max_params) {
mono_llvm_dump_value (callee_lmethod);
g_assert_not_reached ();
}
// Get the values that correspond to the parameters passed to the call
// that used our argument
LLVMValueRef *operands = mono_llvm_call_args (lcall);
for (int call_argument = 0; call_argument < max_params; call_argument++) {
// Every time we used the newly-non-nullable argument, decrement the nullable
// refcount for that function.
if (caller_argument == operands [call_argument]) {
gint32 *nullable_count = &g_array_index (call_site_union, gint32, call_argument);
g_assert (*nullable_count > 0);
*nullable_count = *nullable_count - 1;
// If we caused that callee's parameter to become newly nullable, add to work queue
if (*nullable_count == 0) {
NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem));
item->lmethod = callee_lmethod;
item->argument = call_argument;
queue = g_slist_prepend (queue, item);
}
}
}
g_free (operands);
// Update nullability refcount information for the callee now
g_hash_table_insert (module->method_to_call_info, callee_method, call_site_union);
}
g_slist_free (calls);
g_free (current);
}
}
/*
* Emit the aot module into the LLVM bitcode file FILENAME.
*/
void
mono_llvm_emit_aot_module (const char *filename, const char *cu_name)
{
LLVMTypeRef inited_type;
LLVMValueRef real_inited;
MonoLLVMModule *module = &aot_module;
emit_llvm_code_end (module);
/*
* Create the real init_var and replace all uses of the dummy variable with
* the real one.
*/
inited_type = LLVMArrayType (LLVMInt8Type (), module->max_inited_idx + 1);
real_inited = LLVMAddGlobal (module->lmodule, inited_type, "mono_inited");
LLVMSetInitializer (real_inited, LLVMConstNull (inited_type));
LLVMSetLinkage (real_inited, LLVMInternalLinkage);
mono_llvm_replace_uses_of (module->inited_var, real_inited);
LLVMDeleteGlobal (module->inited_var);
/* Replace the dummy info_ variables with the real ones */
for (int i = 0; i < module->cfgs->len; ++i) {
MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i);
// FIXME: Eliminate unused vars
// FIXME: Speed this up
if (cfg->llvm_dummy_info_var) {
if (cfg->llvm_info_var) {
mono_llvm_replace_uses_of (cfg->llvm_dummy_info_var, cfg->llvm_info_var);
LLVMDeleteGlobal (cfg->llvm_dummy_info_var);
} else {
// FIXME: How can this happen ?
LLVMSetInitializer (cfg->llvm_dummy_info_var, mono_llvm_create_constant_data_array (NULL, 0));
}
}
}
if (module->llvm_only) {
emit_get_method (&aot_module);
emit_get_unbox_tramp (&aot_module);
}
emit_init_aotconst (module);
emit_llvm_used (&aot_module);
emit_dbg_info (&aot_module, filename, cu_name);
emit_aot_file_info (&aot_module);
/* Replace PLT entries for directly callable methods with the methods themselves */
{
GHashTableIter iter;
MonoJumpInfo *ji;
LLVMValueRef callee;
GHashTable *specializable = g_hash_table_new (NULL, NULL);
g_hash_table_iter_init (&iter, module->plt_entries_ji);
while (g_hash_table_iter_next (&iter, (void**)&ji, (void**)&callee)) {
if (mono_aot_is_direct_callable (ji)) {
LLVMValueRef lmethod;
lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, ji->data.method);
/* The types might not match because the caller might pass an rgctx */
if (lmethod && LLVMTypeOf (callee) == LLVMTypeOf (lmethod)) {
mono_llvm_replace_uses_of (callee, lmethod);
if (mono_aot_can_specialize (ji->data.method))
g_hash_table_insert (specializable, lmethod, ji->data.method);
mono_aot_mark_unused_llvm_plt_entry (ji);
}
}
}
mono_llvm_propagate_nonnull_final (specializable, module);
g_hash_table_destroy (specializable);
}
#if 0
{
char *verifier_err;
if (LLVMVerifyModule (module->lmodule, LLVMReturnStatusAction, &verifier_err)) {
printf ("%s\n", verifier_err);
g_assert_not_reached ();
}
}
#endif
/* Note: You can still dump an invalid bitcode file by running `llvm-dis`
* in a debugger, set a breakpoint on `LLVMVerifyModule` and fake its
* result to 0 (indicating success). */
LLVMWriteBitcodeToFile (module->lmodule, filename);
}
static LLVMValueRef
md_string (const char *s)
{
return LLVMMDString (s, strlen (s));
}
/* Debugging support */
static void
emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef args [16], ver;
/*
* This can only be enabled when LLVM code is emitted into a separate object
* file, since the AOT compiler also emits dwarf info,
* and the abbrev indexes will not be correct since llvm has added its own
* abbrevs.
*/
if (!module->emit_dwarf)
return;
mono_llvm_di_builder_finalize (module->di_builder);
args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
args [1] = LLVMMDString ("Dwarf Version", strlen ("Dwarf Version"));
args [2] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
ver = LLVMMDNode (args, 3);
LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver);
args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
args [1] = LLVMMDString ("Debug Info Version", strlen ("Debug Info Version"));
args [2] = LLVMConstInt (LLVMInt64Type (), 3, FALSE);
ver = LLVMMDNode (args, 3);
LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver);
}
static LLVMValueRef
emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name)
{
MonoLLVMModule *module = ctx->module;
MonoDebugMethodInfo *minfo = ctx->minfo;
char *source_file, *dir, *filename;
MonoSymSeqPoint *sym_seq_points;
int n_seq_points;
if (!minfo)
return NULL;
mono_debug_get_seq_points (minfo, &source_file, NULL, NULL, &sym_seq_points, &n_seq_points);
if (!source_file)
source_file = g_strdup ("<unknown>");
dir = g_path_get_dirname (source_file);
filename = g_path_get_basename (source_file);
g_free (source_file);
return (LLVMValueRef)mono_llvm_di_create_function (module->di_builder, module->cu, method, cfg->method->name, name, dir, filename, n_seq_points ? sym_seq_points [0].line : 1);
}
static void
emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code)
{
MonoCompile *cfg = ctx->cfg;
if (ctx->minfo && cil_code && cil_code >= cfg->header->code && cil_code < cfg->header->code + cfg->header->code_size) {
MonoDebugSourceLocation *loc;
LLVMValueRef loc_md;
loc = mono_debug_method_lookup_location (ctx->minfo, cil_code - cfg->header->code);
if (loc) {
loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, loc->row, loc->column);
mono_llvm_di_set_location (builder, loc_md);
mono_debug_free_source_location (loc);
}
}
}
static void
emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder)
{
if (ctx->minfo) {
LLVMValueRef loc_md;
loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, 0, 0);
mono_llvm_di_set_location (builder, loc_md);
}
}
/*
DESIGN:
- Emit LLVM IR from the mono IR using the LLVM C API.
- The original arch specific code remains, so we can fall back to it if we run
into something we can't handle.
*/
/*
A partial list of issues:
- Handling of opcodes which can throw exceptions.
In the mono JIT, these are implemented using code like this:
method:
<compare>
throw_pos:
b<cond> ex_label
<rest of code>
ex_label:
push throw_pos - method
call <exception trampoline>
The problematic part is push throw_pos - method, which cannot be represented
in the LLVM IR, since it does not support label values.
-> this can be implemented in AOT mode using inline asm + labels, but cannot
be implemented in JIT mode ?
-> a possible but slower implementation would use the normal exception
throwing code but it would need to control the placement of the throw code
(it needs to be exactly after the compare+branch).
-> perhaps add a PC offset intrinsics ?
- efficient implementation of .ovf opcodes.
These are currently implemented as:
<ins which sets the condition codes>
b<cond> ex_label
Some overflow opcodes are now supported by LLVM SVN.
- exception handling, unwinding.
- SSA is disabled for methods with exception handlers
- How to obtain unwind info for LLVM compiled methods ?
-> this is now solved by converting the unwind info generated by LLVM
into our format.
- LLVM uses the c++ exception handling framework, while we use our home grown
code, and couldn't use the c++ one:
- its not supported under VC++, other exotic platforms.
- it might be impossible to support filter clauses with it.
- trampolines.
The trampolines need a predictable call sequence, since they need to disasm
the calling code to obtain register numbers / offsets.
LLVM currently generates this code in non-JIT mode:
mov -0x98(%rax),%eax
callq *%rax
Here, the vtable pointer is lost.
-> solution: use one vtable trampoline per class.
- passing/receiving the IMT pointer/RGCTX.
-> solution: pass them as normal arguments ?
- argument passing.
LLVM does not allow the specification of argument registers etc. This means
that all calls are made according to the platform ABI.
- passing/receiving vtypes.
Vtypes passed/received in registers are handled by the front end by using
a signature with scalar arguments, and loading the parts of the vtype into those
arguments.
Vtypes passed on the stack are handled using the 'byval' attribute.
- ldaddr.
Supported though alloca, we need to emit the load/store code.
- types.
The mono JIT uses pointer sized iregs/double fregs, while LLVM uses precisely
typed registers, so we have to keep track of the precise LLVM type of each vreg.
This is made easier because the IR is already in SSA form.
An additional problem is that our IR is not consistent with types, i.e. i32/i64
types are frequently used incorrectly.
*/
/*
AOT SUPPORT:
Emit LLVM bytecode into a .bc file, compile it using llc into a .s file, then link
it with the file containing the methods emitted by the JIT and the AOT data
structures.
*/
/* FIXME: Normalize some aspects of the mono IR to allow easier translation, like:
* - each bblock should end with a branch
* - setting the return value, making cfg->ret non-volatile
* - avoid some transformations in the JIT which make it harder for us to generate
* code.
* - use pointer types to help optimizations.
*/
#else /* DISABLE_JIT */
void
mono_llvm_cleanup (void)
{
}
void
mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager)
{
}
void
mono_llvm_init (gboolean enable_jit)
{
}
#endif /* DISABLE_JIT */
#if !defined(DISABLE_JIT) && !defined(MONO_CROSS_COMPILE)
/* LLVM JIT support */
/*
* decode_llvm_eh_info:
*
* Decode the EH table emitted by llvm in jit mode, and store
* the result into cfg.
*/
static void
decode_llvm_eh_info (EmitContext *ctx, gpointer eh_frame)
{
MonoCompile *cfg = ctx->cfg;
guint8 *cie, *fde;
int fde_len;
MonoLLVMFDEInfo info;
MonoJitExceptionInfo *ei;
guint8 *p = (guint8*)eh_frame;
int version, fde_count, fde_offset;
guint32 ei_len, i, nested_len;
gpointer *type_info;
gint32 *table;
guint8 *unw_info;
/*
* Decode the one element EH table emitted by the MonoException class
* in llvm.
*/
/* Similar to decode_llvm_mono_eh_frame () in aot-runtime.c */
version = *p;
g_assert (version == 3);
p ++;
p ++;
p = (guint8 *)ALIGN_PTR_TO (p, 4);
fde_count = *(guint32*)p;
p += 4;
table = (gint32*)p;
g_assert (fde_count <= 2);
/* The first entry is the real method */
g_assert (table [0] == 1);
fde_offset = table [1];
table += fde_count * 2;
/* Extra entry */
cfg->code_len = table [0];
fde_len = table [1] - fde_offset;
table += 2;
fde = (guint8*)eh_frame + fde_offset;
cie = (guint8*)table;
/* Compute lengths */
mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, NULL, NULL, NULL);
ei = (MonoJitExceptionInfo *)g_malloc0 (info.ex_info_len * sizeof (MonoJitExceptionInfo));
type_info = (gpointer *)g_malloc0 (info.ex_info_len * sizeof (gpointer));
unw_info = (guint8*)g_malloc0 (info.unw_info_len);
mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, ei, type_info, unw_info);
cfg->encoded_unwind_ops = unw_info;
cfg->encoded_unwind_ops_len = info.unw_info_len;
if (cfg->verbose_level > 1)
mono_print_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
if (info.this_reg != -1) {
cfg->llvm_this_reg = info.this_reg;
cfg->llvm_this_offset = info.this_offset;
}
ei_len = info.ex_info_len;
// Nested clauses are currently disabled
nested_len = 0;
cfg->llvm_ex_info = (MonoJitExceptionInfo*)mono_mempool_alloc0 (cfg->mempool, (ei_len + nested_len) * sizeof (MonoJitExceptionInfo));
cfg->llvm_ex_info_len = ei_len + nested_len;
memcpy (cfg->llvm_ex_info, ei, ei_len * sizeof (MonoJitExceptionInfo));
/* Fill the rest of the information from the type info */
for (i = 0; i < ei_len; ++i) {
gint32 clause_index = *(gint32*)type_info [i];
MonoExceptionClause *clause = &cfg->header->clauses [clause_index];
cfg->llvm_ex_info [i].flags = clause->flags;
cfg->llvm_ex_info [i].data.catch_class = clause->data.catch_class;
cfg->llvm_ex_info [i].clause_index = clause_index;
}
}
static MonoLLVMModule*
init_jit_module (void)
{
MonoJitMemoryManager *jit_mm;
MonoLLVMModule *module;
// FIXME:
jit_mm = get_default_jit_mm ();
if (jit_mm->llvm_module)
return (MonoLLVMModule*)jit_mm->llvm_module;
mono_loader_lock ();
if (jit_mm->llvm_module) {
mono_loader_unlock ();
return (MonoLLVMModule*)jit_mm->llvm_module;
}
module = g_new0 (MonoLLVMModule, 1);
module->context = LLVMGetGlobalContext ();
module->mono_ee = (MonoEERef*)mono_llvm_create_ee (&module->ee);
// This contains just the intrinsics
module->lmodule = LLVMModuleCreateWithName ("jit-global-module");
add_intrinsics (module->lmodule);
add_types (module);
module->llvm_types = g_hash_table_new (NULL, NULL);
mono_memory_barrier ();
jit_mm->llvm_module = module;
mono_loader_unlock ();
return (MonoLLVMModule*)jit_mm->llvm_module;
}
static void
llvm_jit_finalize_method (EmitContext *ctx)
{
MonoCompile *cfg = ctx->cfg;
int nvars = g_hash_table_size (ctx->jit_callees);
LLVMValueRef *callee_vars = g_new0 (LLVMValueRef, nvars);
gpointer *callee_addrs = g_new0 (gpointer, nvars);
GHashTableIter iter;
LLVMValueRef var;
MonoMethod *callee;
gpointer eh_frame;
int i;
/*
* Compute the addresses of the LLVM globals pointing to the
* methods called by the current method. Pass it to the trampoline
* code so it can update them after their corresponding method was
* compiled.
*/
g_hash_table_iter_init (&iter, ctx->jit_callees);
i = 0;
while (g_hash_table_iter_next (&iter, NULL, (void**)&var))
callee_vars [i ++] = var;
mono_llvm_optimize_method (ctx->lmethod);
if (cfg->verbose_level > 1) {
g_print ("\n*** Optimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE));
if (cfg->compile_aot) {
mono_llvm_dump_value (ctx->lmethod);
} else {
mono_llvm_dump_module (ctx->lmodule);
}
g_print ("***\n\n");
}
mono_codeman_enable_write ();
cfg->native_code = (guint8*)mono_llvm_compile_method (ctx->module->mono_ee, cfg, ctx->lmethod, nvars, callee_vars, callee_addrs, &eh_frame);
mono_llvm_remove_gc_safepoint_poll (ctx->lmodule);
mono_codeman_disable_write ();
decode_llvm_eh_info (ctx, eh_frame);
// FIXME:
MonoJitMemoryManager *jit_mm = get_default_jit_mm ();
jit_mm_lock (jit_mm);
if (!jit_mm->llvm_jit_callees)
jit_mm->llvm_jit_callees = g_hash_table_new (NULL, NULL);
g_hash_table_iter_init (&iter, ctx->jit_callees);
i = 0;
while (g_hash_table_iter_next (&iter, (void**)&callee, (void**)&var)) {
GSList *addrs = (GSList*)g_hash_table_lookup (jit_mm->llvm_jit_callees, callee);
addrs = g_slist_prepend (addrs, callee_addrs [i]);
g_hash_table_insert (jit_mm->llvm_jit_callees, callee, addrs);
i ++;
}
jit_mm_unlock (jit_mm);
}
#else
static MonoLLVMModule*
init_jit_module (void)
{
g_assert_not_reached ();
}
static void
llvm_jit_finalize_method (EmitContext *ctx)
{
g_assert_not_reached ();
}
#endif
static MonoCPUFeatures cpu_features;
MonoCPUFeatures mono_llvm_get_cpu_features (void)
{
static const CpuFeatureAliasFlag flags_map [] = {
#if defined(TARGET_X86) || defined(TARGET_AMD64)
{ "sse", MONO_CPU_X86_SSE },
{ "sse2", MONO_CPU_X86_SSE2 },
{ "pclmul", MONO_CPU_X86_PCLMUL },
{ "aes", MONO_CPU_X86_AES },
{ "sse2", MONO_CPU_X86_SSE2 },
{ "sse3", MONO_CPU_X86_SSE3 },
{ "ssse3", MONO_CPU_X86_SSSE3 },
{ "sse4.1", MONO_CPU_X86_SSE41 },
{ "sse4.2", MONO_CPU_X86_SSE42 },
{ "popcnt", MONO_CPU_X86_POPCNT },
{ "avx", MONO_CPU_X86_AVX },
{ "avx2", MONO_CPU_X86_AVX2 },
{ "fma", MONO_CPU_X86_FMA },
{ "lzcnt", MONO_CPU_X86_LZCNT },
{ "bmi", MONO_CPU_X86_BMI1 },
{ "bmi2", MONO_CPU_X86_BMI2 },
#endif
#if defined(TARGET_ARM64)
{ "crc", MONO_CPU_ARM64_CRC },
{ "crypto", MONO_CPU_ARM64_CRYPTO },
{ "neon", MONO_CPU_ARM64_NEON },
{ "rdm", MONO_CPU_ARM64_RDM },
{ "dotprod", MONO_CPU_ARM64_DP },
#endif
#if defined(TARGET_WASM)
{ "simd", MONO_CPU_WASM_SIMD },
#endif
// flags_map cannot be zero length in MSVC, so add useless dummy entry for arm32
#if defined(TARGET_ARM) && defined(HOST_WIN32)
{ "inited", MONO_CPU_INITED},
#endif
};
if (!cpu_features)
cpu_features = MONO_CPU_INITED | (MonoCPUFeatures)mono_llvm_check_cpu_features (flags_map, G_N_ELEMENTS (flags_map));
return cpu_features;
}
| /**
* \file
* llvm "Backend" for the mono JIT
*
* Copyright 2009-2011 Novell Inc (http://www.novell.com)
* Copyright 2011 Xamarin Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "config.h"
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/debug-internals.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/object-internals.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/tokentype.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/mono-dl.h>
#include <mono/utils/mono-time.h>
#include <mono/utils/freebsd-dwarf.h>
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "llvm-c/BitWriter.h"
#include "llvm-c/Analysis.h"
#include "mini-llvm-cpp.h"
#include "llvm-jit.h"
#include "aot-compiler.h"
#include "mini-llvm.h"
#include "mini-runtime.h"
#include <mono/utils/mono-math.h>
#ifndef DISABLE_JIT
#if defined(TARGET_AMD64) && defined(TARGET_WIN32) && defined(HOST_WIN32) && defined(_MSC_VER)
#define TARGET_X86_64_WIN32_MSVC
#endif
#if defined(TARGET_X86_64_WIN32_MSVC)
#define TARGET_WIN32_MSVC
#endif
#if LLVM_API_VERSION < 900
#error "The version of the mono llvm repository is too old."
#endif
/*
* Information associated by mono with LLVM modules.
*/
typedef struct {
LLVMModuleRef lmodule;
LLVMValueRef throw_icall, rethrow, throw_corlib_exception;
GHashTable *llvm_types;
LLVMValueRef dummy_got_var;
const char *get_method_symbol;
const char *get_unbox_tramp_symbol;
const char *init_aotconst_symbol;
GHashTable *plt_entries;
GHashTable *plt_entries_ji;
GHashTable *method_to_lmethod;
GHashTable *method_to_call_info;
GHashTable *lvalue_to_lcalls;
GHashTable *direct_callables;
/* Maps got slot index -> LLVMValueRef */
GHashTable *aotconst_vars;
char **bb_names;
int bb_names_len;
GPtrArray *used;
LLVMTypeRef ptr_type;
GPtrArray *subprogram_mds;
MonoEERef *mono_ee;
LLVMExecutionEngineRef ee;
gboolean external_symbols;
gboolean emit_dwarf;
int max_got_offset;
LLVMValueRef personality;
gpointer gc_poll_cold_wrapper_compiled;
/* For AOT */
MonoAssembly *assembly;
char *global_prefix;
MonoAotFileInfo aot_info;
const char *eh_frame_symbol;
LLVMValueRef get_method, get_unbox_tramp, init_aotconst_func;
LLVMValueRef init_methods [AOT_INIT_METHOD_NUM];
LLVMValueRef code_start, code_end;
LLVMValueRef inited_var;
LLVMValueRef unbox_tramp_indexes;
LLVMValueRef unbox_trampolines;
LLVMValueRef gc_poll_cold_wrapper;
LLVMValueRef info_var;
LLVMTypeRef *info_var_eltypes;
int max_inited_idx, max_method_idx;
gboolean has_jitted_code;
gboolean static_link;
gboolean llvm_only;
gboolean interp;
GHashTable *idx_to_lmethod;
GHashTable *idx_to_unbox_tramp;
GPtrArray *callsite_list;
LLVMContextRef context;
LLVMValueRef sentinel_exception;
LLVMValueRef gc_safe_point_flag_var;
LLVMValueRef interrupt_flag_var;
void *di_builder, *cu;
GHashTable *objc_selector_to_var;
GPtrArray *cfgs;
int unbox_tramp_num, unbox_tramp_elemsize;
GHashTable *got_idx_to_type;
GHashTable *no_method_table_lmethods;
} MonoLLVMModule;
/*
* Information associated by the backend with mono basic blocks.
*/
typedef struct {
LLVMBasicBlockRef bblock, end_bblock;
LLVMValueRef finally_ind;
gboolean added, invoke_target;
/*
* If this bblock is the start of a finally clause, this is a list of bblocks it
* needs to branch to in ENDFINALLY.
*/
GSList *call_handler_return_bbs;
/*
* If this bblock is the start of a finally clause, this is the bblock that
* CALL_HANDLER needs to branch to.
*/
LLVMBasicBlockRef call_handler_target_bb;
/* The list of switch statements generated by ENDFINALLY instructions */
GSList *endfinally_switch_ins_list;
GSList *phi_nodes;
} BBInfo;
/*
* Structure containing emit state
*/
typedef struct {
MonoMemPool *mempool;
/* Maps method names to the corresponding LLVMValueRef */
GHashTable *emitted_method_decls;
MonoCompile *cfg;
LLVMValueRef lmethod;
MonoLLVMModule *module;
LLVMModuleRef lmodule;
BBInfo *bblocks;
int sindex, default_index, ex_index;
LLVMBuilderRef builder;
LLVMValueRef *values, *addresses;
MonoType **vreg_cli_types;
LLVMCallInfo *linfo;
MonoMethodSignature *sig;
GSList *builders;
GHashTable *region_to_handler;
GHashTable *clause_to_handler;
LLVMBuilderRef alloca_builder;
LLVMValueRef last_alloca;
LLVMValueRef rgctx_arg;
LLVMValueRef this_arg;
LLVMTypeRef *vreg_types;
gboolean *is_vphi;
LLVMTypeRef method_type;
LLVMBasicBlockRef init_bb, inited_bb;
gboolean *is_dead;
gboolean *unreachable;
gboolean llvm_only;
gboolean has_got_access;
gboolean is_linkonce;
gboolean emit_dummy_arg;
gboolean has_safepoints;
gboolean has_catch;
int this_arg_pindex, rgctx_arg_pindex;
LLVMValueRef imt_rgctx_loc;
GHashTable *llvm_types;
LLVMValueRef dbg_md;
MonoDebugMethodInfo *minfo;
/* For every clause, the clauses it is nested in */
GSList **nested_in;
LLVMValueRef ex_var;
GHashTable *exc_meta;
GPtrArray *callsite_list;
GPtrArray *phi_values;
GPtrArray *bblock_list;
char *method_name;
GHashTable *jit_callees;
LLVMValueRef long_bb_break_var;
int *gc_var_indexes;
LLVMValueRef gc_pin_area;
LLVMValueRef il_state;
LLVMValueRef il_state_ret;
} EmitContext;
typedef struct {
MonoBasicBlock *bb;
MonoInst *phi;
MonoBasicBlock *in_bb;
int sreg;
} PhiNode;
/*
* Instruction metadata
* This is the same as ins_info, but LREG != IREG.
*/
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
#define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
#define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
#define NONE ' '
#define IREG 'i'
#define FREG 'f'
#define VREG 'v'
#define XREG 'x'
#define LREG 'l'
/* keep in sync with the enum in mini.h */
const char
mini_llvm_ins_info[] = {
#include "mini-ops.h"
};
#undef MINI_OP
#undef MINI_OP3
#if TARGET_SIZEOF_VOID_P == 4
#define GET_LONG_IMM(ins) ((ins)->inst_l)
#else
#define GET_LONG_IMM(ins) ((ins)->inst_imm)
#endif
#define LLVM_INS_INFO(opcode) (&mini_llvm_ins_info [((opcode) - OP_START - 1) * 4])
#if 0
#define TRACE_FAILURE(msg) do { printf ("%s\n", msg); } while (0)
#else
#define TRACE_FAILURE(msg)
#endif
#ifdef TARGET_X86
#define IS_TARGET_X86 1
#else
#define IS_TARGET_X86 0
#endif
#ifdef TARGET_AMD64
#define IS_TARGET_AMD64 1
#else
#define IS_TARGET_AMD64 0
#endif
#define ctx_ok(ctx) (!(ctx)->cfg->disable_llvm)
enum {
MAX_VECTOR_ELEMS = 32, // 2 vectors * 128 bits per vector / 8 bits per element
ARM64_MAX_VECTOR_ELEMS = 16,
};
const int mask_0_incr_1 [] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
};
static LLVMIntPredicate cond_to_llvm_cond [] = {
LLVMIntEQ,
LLVMIntNE,
LLVMIntSLE,
LLVMIntSGE,
LLVMIntSLT,
LLVMIntSGT,
LLVMIntULE,
LLVMIntUGE,
LLVMIntULT,
LLVMIntUGT,
};
static LLVMRealPredicate fpcond_to_llvm_cond [] = {
LLVMRealOEQ,
LLVMRealUNE,
LLVMRealOLE,
LLVMRealOGE,
LLVMRealOLT,
LLVMRealOGT,
LLVMRealULE,
LLVMRealUGE,
LLVMRealULT,
LLVMRealUGT,
LLVMRealORD,
LLVMRealUNO
};
/* See Table 3-1 ("Comparison Predicate for CMPPD and CMPPS Instructions") in
* Vol. 2A of the Intel SDM.
*/
enum {
SSE_eq_ord_nosignal = 0,
SSE_lt_ord_signal = 1,
SSE_le_ord_signal = 2,
SSE_unord_nosignal = 3,
SSE_neq_unord_nosignal = 4,
SSE_nlt_unord_signal = 5,
SSE_nle_unord_signal = 6,
SSE_ord_nosignal = 7,
};
static MonoLLVMModule aot_module;
static GHashTable *intrins_id_to_intrins;
static LLVMTypeRef i1_t, i2_t, i4_t, i8_t, r4_t, r8_t;
static LLVMTypeRef sse_i1_t, sse_i2_t, sse_i4_t, sse_i8_t, sse_r4_t, sse_r8_t;
static LLVMTypeRef v64_i1_t, v64_i2_t, v64_i4_t, v64_i8_t, v64_r4_t, v64_r8_t;
static LLVMTypeRef v128_i1_t, v128_i2_t, v128_i4_t, v128_i8_t, v128_r4_t, v128_r8_t;
static LLVMTypeRef void_func_t;
static MonoLLVMModule *init_jit_module (void);
static void emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code);
static void emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder);
static LLVMValueRef emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name);
static void emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name);
static void emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit);
static LLVMValueRef get_intrins (EmitContext *ctx, int id);
static LLVMValueRef get_intrins_from_module (LLVMModuleRef lmodule, int id);
static void llvm_jit_finalize_method (EmitContext *ctx);
static void mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params);
static void mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module);
static void create_aot_info_var (MonoLLVMModule *module);
static void set_invariant_load_flag (LLVMValueRef v);
static void set_nonnull_load_flag (LLVMValueRef v);
enum {
INTRIN_scalar = 1 << 0,
INTRIN_vector64 = 1 << 1,
INTRIN_vector128 = 1 << 2,
INTRIN_vectorwidths = 3,
INTRIN_vectormask = 0x7,
INTRIN_int8 = 1 << 3,
INTRIN_int16 = 1 << 4,
INTRIN_int32 = 1 << 5,
INTRIN_int64 = 1 << 6,
INTRIN_float32 = 1 << 7,
INTRIN_float64 = 1 << 8,
INTRIN_elementwidths = 6,
};
typedef uint16_t llvm_ovr_tag_t;
static LLVMTypeRef intrin_types [INTRIN_vectorwidths][INTRIN_elementwidths];
static const llvm_ovr_tag_t intrin_arm64_ovr [] = {
#define INTRINS(sym, ...) 0,
#define INTRINS_OVR(sym, ...) 0,
#define INTRINS_OVR_2_ARG(sym, ...) 0,
#define INTRINS_OVR_3_ARG(sym, ...) 0,
#define INTRINS_OVR_TAG(sym, _, arch, spec) spec,
#define INTRINS_OVR_TAG_KIND(sym, _, kind, arch, spec) spec,
#include "llvm-intrinsics.h"
};
enum {
INTRIN_kind_ftoi = 1,
INTRIN_kind_widen,
INTRIN_kind_widen_across,
INTRIN_kind_across,
INTRIN_kind_arm64_dot_prod,
};
static const uint8_t intrin_kind [] = {
#define INTRINS(sym, ...) 0,
#define INTRINS_OVR(sym, ...) 0,
#define INTRINS_OVR_2_ARG(sym, ...) 0,
#define INTRINS_OVR_3_ARG(sym, ...) 0,
#define INTRINS_OVR_TAG(sym, _, arch, spec) 0,
#define INTRINS_OVR_TAG_KIND(sym, _, arch, kind, spec) kind,
#include "llvm-intrinsics.h"
};
static inline llvm_ovr_tag_t
ovr_tag_force_scalar (llvm_ovr_tag_t tag)
{
return (tag & ~INTRIN_vectormask) | INTRIN_scalar;
}
static inline llvm_ovr_tag_t
ovr_tag_smaller_vector (llvm_ovr_tag_t tag)
{
return (tag & ~INTRIN_vectormask) | ((tag & INTRIN_vectormask) >> 1);
}
static inline llvm_ovr_tag_t
ovr_tag_smaller_elements (llvm_ovr_tag_t tag)
{
return ((tag & ~INTRIN_vectormask) >> 1) | (tag & INTRIN_vectormask);
}
static inline llvm_ovr_tag_t
ovr_tag_corresponding_integer (llvm_ovr_tag_t tag)
{
return ((tag & ~INTRIN_vectormask) >> 2) | (tag & INTRIN_vectormask);
}
static LLVMTypeRef
ovr_tag_to_llvm_type (llvm_ovr_tag_t tag)
{
int vw = 0;
int ew = 0;
if (tag & INTRIN_vector64) vw = 1;
else if (tag & INTRIN_vector128) vw = 2;
if (tag & INTRIN_int16) ew = 1;
else if (tag & INTRIN_int32) ew = 2;
else if (tag & INTRIN_int64) ew = 3;
else if (tag & INTRIN_float32) ew = 4;
else if (tag & INTRIN_float64) ew = 5;
return intrin_types [vw][ew];
}
static int
key_from_id_and_tag (int id, llvm_ovr_tag_t ovr_tag)
{
return (((int) ovr_tag) << 23) | id;
}
static llvm_ovr_tag_t
ovr_tag_from_mono_vector_class (MonoClass *klass) {
int size = mono_class_value_size (klass, NULL);
llvm_ovr_tag_t ret = 0;
switch (size) {
case 8: ret |= INTRIN_vector64; break;
case 16: ret |= INTRIN_vector128; break;
}
MonoType *etype = mono_class_get_context (klass)->class_inst->type_argv [0];
switch (etype->type) {
case MONO_TYPE_I1: case MONO_TYPE_U1: ret |= INTRIN_int8; break;
case MONO_TYPE_I2: case MONO_TYPE_U2: ret |= INTRIN_int16; break;
case MONO_TYPE_I4: case MONO_TYPE_U4: ret |= INTRIN_int32; break;
case MONO_TYPE_I8: case MONO_TYPE_U8: ret |= INTRIN_int64; break;
case MONO_TYPE_R4: ret |= INTRIN_float32; break;
case MONO_TYPE_R8: ret |= INTRIN_float64; break;
}
return ret;
}
static llvm_ovr_tag_t
ovr_tag_from_llvm_type (LLVMTypeRef type)
{
llvm_ovr_tag_t ret = 0;
LLVMTypeKind kind = LLVMGetTypeKind (type);
LLVMTypeRef elem_t = NULL;
switch (kind) {
case LLVMVectorTypeKind: {
elem_t = LLVMGetElementType (type);
unsigned int bits = mono_llvm_get_prim_size_bits (type);
switch (bits) {
case 64: ret |= INTRIN_vector64; break;
case 128: ret |= INTRIN_vector128; break;
default: g_assert_not_reached ();
}
break;
}
default:
g_assert_not_reached ();
}
if (elem_t == i1_t) ret |= INTRIN_int8;
if (elem_t == i2_t) ret |= INTRIN_int16;
if (elem_t == i4_t) ret |= INTRIN_int32;
if (elem_t == i8_t) ret |= INTRIN_int64;
if (elem_t == r4_t) ret |= INTRIN_float32;
if (elem_t == r8_t) ret |= INTRIN_float64;
return ret;
}
static inline void
set_failure (EmitContext *ctx, const char *message)
{
TRACE_FAILURE (reason);
ctx->cfg->exception_message = g_strdup (message);
ctx->cfg->disable_llvm = TRUE;
}
static LLVMValueRef
const_int1 (int v)
{
return LLVMConstInt (LLVMInt1Type (), v ? 1 : 0, FALSE);
}
static LLVMValueRef
const_int8 (int v)
{
return LLVMConstInt (LLVMInt8Type (), v, FALSE);
}
static LLVMValueRef
const_int32 (int v)
{
return LLVMConstInt (LLVMInt32Type (), v, FALSE);
}
static LLVMValueRef
const_int64 (int64_t v)
{
return LLVMConstInt (LLVMInt64Type (), v, FALSE);
}
/*
* IntPtrType:
*
* The LLVM type with width == TARGET_SIZEOF_VOID_P
*/
static LLVMTypeRef
IntPtrType (void)
{
return TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type ();
}
static LLVMTypeRef
ObjRefType (void)
{
return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0);
}
static LLVMTypeRef
ThisType (void)
{
return TARGET_SIZEOF_VOID_P == 8 ? LLVMPointerType (LLVMInt64Type (), 0) : LLVMPointerType (LLVMInt32Type (), 0);
}
typedef struct {
int32_t size;
uint32_t align;
} MonoSizeAlign;
/*
* get_vtype_size:
*
* Return the size of the LLVM representation of the vtype T.
*/
static MonoSizeAlign
get_vtype_size_align (MonoType *t)
{
uint32_t align = 0;
int32_t size = mono_class_value_size (mono_class_from_mono_type_internal (t), &align);
/* LLVMArgAsIArgs depends on this since it stores whole words */
while (size < 2 * TARGET_SIZEOF_VOID_P && mono_is_power_of_two (size) == -1)
size ++;
MonoSizeAlign ret = { size, align };
return ret;
}
/*
* simd_class_to_llvm_type:
*
* Return the LLVM type corresponding to the Mono.SIMD class KLASS
*/
static LLVMTypeRef
simd_class_to_llvm_type (EmitContext *ctx, MonoClass *klass)
{
const char *klass_name = m_class_get_name (klass);
if (!strcmp (klass_name, "Vector2d")) {
return LLVMVectorType (LLVMDoubleType (), 2);
} else if (!strcmp (klass_name, "Vector2l")) {
return LLVMVectorType (LLVMInt64Type (), 2);
} else if (!strcmp (klass_name, "Vector2ul")) {
return LLVMVectorType (LLVMInt64Type (), 2);
} else if (!strcmp (klass_name, "Vector4i")) {
return LLVMVectorType (LLVMInt32Type (), 4);
} else if (!strcmp (klass_name, "Vector4ui")) {
return LLVMVectorType (LLVMInt32Type (), 4);
} else if (!strcmp (klass_name, "Vector4f")) {
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector8s")) {
return LLVMVectorType (LLVMInt16Type (), 8);
} else if (!strcmp (klass_name, "Vector8us")) {
return LLVMVectorType (LLVMInt16Type (), 8);
} else if (!strcmp (klass_name, "Vector16sb")) {
return LLVMVectorType (LLVMInt8Type (), 16);
} else if (!strcmp (klass_name, "Vector16b")) {
return LLVMVectorType (LLVMInt8Type (), 16);
} else if (!strcmp (klass_name, "Vector2")) {
/* System.Numerics */
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector3")) {
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector4")) {
return LLVMVectorType (LLVMFloatType (), 4);
} else if (!strcmp (klass_name, "Vector`1") || !strcmp (klass_name, "Vector64`1") || !strcmp (klass_name, "Vector128`1") || !strcmp (klass_name, "Vector256`1")) {
MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0];
int size = mono_class_value_size (klass, NULL);
switch (etype->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return LLVMVectorType (LLVMInt8Type (), size);
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return LLVMVectorType (LLVMInt16Type (), size / 2);
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return LLVMVectorType (LLVMInt32Type (), size / 4);
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return LLVMVectorType (LLVMInt64Type (), size / 8);
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return LLVMVectorType (LLVMInt64Type (), size / 8);
#else
return LLVMVectorType (LLVMInt32Type (), size / 4);
#endif
case MONO_TYPE_R4:
return LLVMVectorType (LLVMFloatType (), size / 4);
case MONO_TYPE_R8:
return LLVMVectorType (LLVMDoubleType (), size / 8);
default:
g_assert_not_reached ();
return NULL;
}
} else {
printf ("%s\n", klass_name);
NOT_IMPLEMENTED;
return NULL;
}
}
static LLVMTypeRef
simd_valuetuple_to_llvm_type (EmitContext *ctx, MonoClass *klass)
{
const char *klass_name = m_class_get_name (klass);
if (!strcmp (klass_name, "ValueTuple`2")) {
MonoType *etype = mono_class_get_generic_class (klass)->context.class_inst->type_argv [0];
if (etype->type != MONO_TYPE_GENERICINST)
g_assert_not_reached ();
MonoClass *eklass = etype->data.generic_class->cached_class;
LLVMTypeRef ltype = simd_class_to_llvm_type (ctx, eklass);
return LLVMArrayType (ltype, 2);
}
g_assert_not_reached ();
}
/* Return the 128 bit SIMD type corresponding to the mono type TYPE */
static inline G_GNUC_UNUSED LLVMTypeRef
type_to_sse_type (int type)
{
switch (type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return LLVMVectorType (LLVMInt8Type (), 16);
case MONO_TYPE_U2:
case MONO_TYPE_I2:
return LLVMVectorType (LLVMInt16Type (), 8);
case MONO_TYPE_U4:
case MONO_TYPE_I4:
return LLVMVectorType (LLVMInt32Type (), 4);
case MONO_TYPE_U8:
case MONO_TYPE_I8:
return LLVMVectorType (LLVMInt64Type (), 2);
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 8
return LLVMVectorType (LLVMInt64Type (), 2);
#else
return LLVMVectorType (LLVMInt32Type (), 4);
#endif
case MONO_TYPE_R8:
return LLVMVectorType (LLVMDoubleType (), 2);
case MONO_TYPE_R4:
return LLVMVectorType (LLVMFloatType (), 4);
default:
g_assert_not_reached ();
return NULL;
}
}
static LLVMTypeRef
create_llvm_type_for_type (MonoLLVMModule *module, MonoClass *klass)
{
int i, size, nfields, esize;
LLVMTypeRef *eltypes;
char *name;
MonoType *t;
LLVMTypeRef ltype;
t = m_class_get_byval_arg (klass);
if (mini_type_is_hfa (t, &nfields, &esize)) {
/*
* This is needed on arm64 where HFAs are returned in
* registers.
*/
/* SIMD types have size 16 in mono_class_value_size () */
if (m_class_is_simd_type (klass))
nfields = 16/ esize;
size = nfields;
eltypes = g_new (LLVMTypeRef, size);
for (i = 0; i < size; ++i)
eltypes [i] = esize == 4 ? LLVMFloatType () : LLVMDoubleType ();
} else {
MonoSizeAlign size_align = get_vtype_size_align (t);
eltypes = g_new (LLVMTypeRef, size_align.size);
size = 0;
uint32_t bytes = 0;
uint32_t chunk = size_align.align < TARGET_SIZEOF_VOID_P ? size_align.align : TARGET_SIZEOF_VOID_P;
for (; chunk > 0; chunk = chunk >> 1) {
for (; (bytes + chunk) <= size_align.size; bytes += chunk) {
eltypes [size] = LLVMIntType (chunk * 8);
++size;
}
}
}
name = mono_type_full_name (m_class_get_byval_arg (klass));
ltype = LLVMStructCreateNamed (module->context, name);
LLVMStructSetBody (ltype, eltypes, size, FALSE);
g_free (eltypes);
g_free (name);
return ltype;
}
static LLVMTypeRef
primitive_type_to_llvm_type (MonoTypeEnum type)
{
switch (type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return LLVMInt8Type ();
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return LLVMInt16Type ();
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return LLVMInt32Type ();
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return LLVMInt64Type ();
case MONO_TYPE_R4:
return LLVMFloatType ();
case MONO_TYPE_R8:
return LLVMDoubleType ();
case MONO_TYPE_I:
case MONO_TYPE_U:
return IntPtrType ();
default:
return NULL;
}
}
static MonoTypeEnum
inst_c1_type (const MonoInst *ins)
{
return (MonoTypeEnum)ins->inst_c1;
}
/*
* type_to_llvm_type:
*
* Return the LLVM type corresponding to T.
*/
static LLVMTypeRef
type_to_llvm_type (EmitContext *ctx, MonoType *t)
{
if (m_type_is_byref (t))
return ThisType ();
t = mini_get_underlying_type (t);
LLVMTypeRef prim_llvm_type = primitive_type_to_llvm_type (t->type);
if (prim_llvm_type != NULL)
return prim_llvm_type;
switch (t->type) {
case MONO_TYPE_VOID:
return LLVMVoidType ();
case MONO_TYPE_OBJECT:
return ObjRefType ();
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR: {
MonoClass *klass = mono_class_from_mono_type_internal (t);
MonoClass *ptr_klass = m_class_get_element_class (klass);
MonoType *ptr_type = m_class_get_byval_arg (ptr_klass);
/* Handle primitive pointers */
switch (ptr_type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_I2:
case MONO_TYPE_I4:
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_U4:
return LLVMPointerType (type_to_llvm_type (ctx, ptr_type), 0);
}
return ObjRefType ();
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* Because of generic sharing */
return ObjRefType ();
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t))
return ObjRefType ();
/* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
MonoClass *klass;
LLVMTypeRef ltype;
klass = mono_class_from_mono_type_internal (t);
if (MONO_CLASS_IS_SIMD (ctx->cfg, klass))
return simd_class_to_llvm_type (ctx, klass);
if (m_class_is_enumtype (klass))
return type_to_llvm_type (ctx, mono_class_enum_basetype_internal (klass));
ltype = (LLVMTypeRef)g_hash_table_lookup (ctx->module->llvm_types, klass);
if (!ltype) {
ltype = create_llvm_type_for_type (ctx->module, klass);
g_hash_table_insert (ctx->module->llvm_types, klass, ltype);
}
return ltype;
}
default:
printf ("X: %d\n", t->type);
ctx->cfg->exception_message = g_strdup_printf ("type %s", mono_type_full_name (t));
ctx->cfg->disable_llvm = TRUE;
return NULL;
}
}
static gboolean
primitive_type_is_unsigned (MonoTypeEnum t)
{
switch (t) {
case MONO_TYPE_U1:
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
case MONO_TYPE_U4:
case MONO_TYPE_U8:
case MONO_TYPE_U:
return TRUE;
default:
return FALSE;
}
}
/*
* type_is_unsigned:
*
* Return whenever T is an unsigned int type.
*/
static gboolean
type_is_unsigned (EmitContext *ctx, MonoType *t)
{
t = mini_get_underlying_type (t);
if (m_type_is_byref (t))
return FALSE;
return primitive_type_is_unsigned (t->type);
}
/*
* type_to_llvm_arg_type:
*
* Same as type_to_llvm_type, but treat i8/i16 as i32.
*/
static LLVMTypeRef
type_to_llvm_arg_type (EmitContext *ctx, MonoType *t)
{
LLVMTypeRef ptype = type_to_llvm_type (ctx, t);
if (ctx->cfg->llvm_only)
return ptype;
/*
* This works on all abis except arm64/ios which passes multiple
* arguments in one stack slot.
*/
#ifndef TARGET_ARM64
if (ptype == LLVMInt8Type () || ptype == LLVMInt16Type ()) {
/*
* LLVM generates code which only sets the lower bits, while JITted
* code expects all the bits to be set.
*/
ptype = LLVMInt32Type ();
}
#endif
return ptype;
}
/*
* llvm_type_to_stack_type:
*
* Return the LLVM type which needs to be used when a value of type TYPE is pushed
* on the IL stack.
*/
static G_GNUC_UNUSED LLVMTypeRef
llvm_type_to_stack_type (MonoCompile *cfg, LLVMTypeRef type)
{
if (type == NULL)
return NULL;
if (type == LLVMInt8Type ())
return LLVMInt32Type ();
else if (type == LLVMInt16Type ())
return LLVMInt32Type ();
else if (!cfg->r4fp && type == LLVMFloatType ())
return LLVMDoubleType ();
else
return type;
}
/*
* regtype_to_llvm_type:
*
* Return the LLVM type corresponding to the regtype C used in instruction
* descriptions.
*/
static LLVMTypeRef
regtype_to_llvm_type (char c)
{
switch (c) {
case 'i':
return LLVMInt32Type ();
case 'l':
return LLVMInt64Type ();
case 'f':
return LLVMDoubleType ();
default:
return NULL;
}
}
/*
* op_to_llvm_type:
*
* Return the LLVM type corresponding to the unary/binary opcode OPCODE.
*/
static LLVMTypeRef
op_to_llvm_type (int opcode)
{
switch (opcode) {
case OP_ICONV_TO_I1:
case OP_LCONV_TO_I1:
return LLVMInt8Type ();
case OP_ICONV_TO_U1:
case OP_LCONV_TO_U1:
return LLVMInt8Type ();
case OP_ICONV_TO_I2:
case OP_LCONV_TO_I2:
return LLVMInt16Type ();
case OP_ICONV_TO_U2:
case OP_LCONV_TO_U2:
return LLVMInt16Type ();
case OP_ICONV_TO_I4:
case OP_LCONV_TO_I4:
return LLVMInt32Type ();
case OP_ICONV_TO_U4:
case OP_LCONV_TO_U4:
return LLVMInt32Type ();
case OP_ICONV_TO_I8:
return LLVMInt64Type ();
case OP_ICONV_TO_R4:
return LLVMFloatType ();
case OP_ICONV_TO_R8:
return LLVMDoubleType ();
case OP_ICONV_TO_U8:
return LLVMInt64Type ();
case OP_FCONV_TO_I4:
return LLVMInt32Type ();
case OP_FCONV_TO_I8:
return LLVMInt64Type ();
case OP_FCONV_TO_I1:
case OP_FCONV_TO_U1:
case OP_RCONV_TO_I1:
case OP_RCONV_TO_U1:
return LLVMInt8Type ();
case OP_FCONV_TO_I2:
case OP_FCONV_TO_U2:
case OP_RCONV_TO_I2:
case OP_RCONV_TO_U2:
return LLVMInt16Type ();
case OP_FCONV_TO_U4:
case OP_RCONV_TO_U4:
return LLVMInt32Type ();
case OP_FCONV_TO_U8:
case OP_RCONV_TO_U8:
return LLVMInt64Type ();
case OP_FCONV_TO_I:
case OP_RCONV_TO_I:
return TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type ();
case OP_IADD_OVF:
case OP_IADD_OVF_UN:
case OP_ISUB_OVF:
case OP_ISUB_OVF_UN:
case OP_IMUL_OVF:
case OP_IMUL_OVF_UN:
return LLVMInt32Type ();
case OP_LADD_OVF:
case OP_LADD_OVF_UN:
case OP_LSUB_OVF:
case OP_LSUB_OVF_UN:
case OP_LMUL_OVF:
case OP_LMUL_OVF_UN:
return LLVMInt64Type ();
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
return NULL;
}
}
#define CLAUSE_START(clause) ((clause)->try_offset)
#define CLAUSE_END(clause) (((clause))->try_offset + ((clause))->try_len)
/*
* load_store_to_llvm_type:
*
* Return the size/sign/zero extension corresponding to the load/store opcode
* OPCODE.
*/
static LLVMTypeRef
load_store_to_llvm_type (int opcode, int *size, gboolean *sext, gboolean *zext)
{
*sext = FALSE;
*zext = FALSE;
switch (opcode) {
case OP_LOADI1_MEMBASE:
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI1_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_STORE_I1:
*size = 1;
*sext = TRUE;
return LLVMInt8Type ();
case OP_LOADU1_MEMBASE:
case OP_LOADU1_MEM:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_STORE_U1:
*size = 1;
*zext = TRUE;
return LLVMInt8Type ();
case OP_LOADI2_MEMBASE:
case OP_STOREI2_MEMBASE_REG:
case OP_STOREI2_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_STORE_I2:
*size = 2;
*sext = TRUE;
return LLVMInt16Type ();
case OP_LOADU2_MEMBASE:
case OP_LOADU2_MEM:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_STORE_U2:
*size = 2;
*zext = TRUE;
return LLVMInt16Type ();
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADI4_MEM:
case OP_LOADU4_MEM:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI4_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_STORE_U4:
*size = 4;
return LLVMInt32Type ();
case OP_LOADI8_MEMBASE:
case OP_LOADI8_MEM:
case OP_STOREI8_MEMBASE_REG:
case OP_STOREI8_MEMBASE_IMM:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_STORE_U8:
*size = 8;
return LLVMInt64Type ();
case OP_LOADR4_MEMBASE:
case OP_STORER4_MEMBASE_REG:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_STORE_R4:
*size = 4;
return LLVMFloatType ();
case OP_LOADR8_MEMBASE:
case OP_STORER8_MEMBASE_REG:
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_R8:
*size = 8;
return LLVMDoubleType ();
case OP_LOAD_MEMBASE:
case OP_LOAD_MEM:
case OP_STORE_MEMBASE_REG:
case OP_STORE_MEMBASE_IMM:
*size = TARGET_SIZEOF_VOID_P;
return IntPtrType ();
default:
g_assert_not_reached ();
return NULL;
}
}
/*
* ovf_op_to_intrins:
*
* Return the LLVM intrinsics corresponding to the overflow opcode OPCODE.
*/
static IntrinsicId
ovf_op_to_intrins (int opcode)
{
switch (opcode) {
case OP_IADD_OVF:
return INTRINS_SADD_OVF_I32;
case OP_IADD_OVF_UN:
return INTRINS_UADD_OVF_I32;
case OP_ISUB_OVF:
return INTRINS_SSUB_OVF_I32;
case OP_ISUB_OVF_UN:
return INTRINS_USUB_OVF_I32;
case OP_IMUL_OVF:
return INTRINS_SMUL_OVF_I32;
case OP_IMUL_OVF_UN:
return INTRINS_UMUL_OVF_I32;
case OP_LADD_OVF:
return INTRINS_SADD_OVF_I64;
case OP_LADD_OVF_UN:
return INTRINS_UADD_OVF_I64;
case OP_LSUB_OVF:
return INTRINS_SSUB_OVF_I64;
case OP_LSUB_OVF_UN:
return INTRINS_USUB_OVF_I64;
case OP_LMUL_OVF:
return INTRINS_SMUL_OVF_I64;
case OP_LMUL_OVF_UN:
return INTRINS_UMUL_OVF_I64;
default:
g_assert_not_reached ();
return (IntrinsicId)0;
}
}
static IntrinsicId
simd_ins_to_intrins (int opcode)
{
switch (opcode) {
#if defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_CVTPD2DQ:
return INTRINS_SSE_CVTPD2DQ;
case OP_CVTPS2DQ:
return INTRINS_SSE_CVTPS2DQ;
case OP_CVTPD2PS:
return INTRINS_SSE_CVTPD2PS;
case OP_CVTTPD2DQ:
return INTRINS_SSE_CVTTPD2DQ;
case OP_CVTTPS2DQ:
return INTRINS_SSE_CVTTPS2DQ;
case OP_SSE_SQRTSS:
return INTRINS_SSE_SQRT_SS;
case OP_SSE2_SQRTSD:
return INTRINS_SSE_SQRT_SD;
#endif
default:
g_assert_not_reached ();
return (IntrinsicId)0;
}
}
static LLVMTypeRef
simd_op_to_llvm_type (int opcode)
{
#if defined(TARGET_X86) || defined(TARGET_AMD64)
switch (opcode) {
case OP_EXTRACT_R8:
case OP_EXPAND_R8:
return sse_r8_t;
case OP_EXTRACT_I8:
case OP_EXPAND_I8:
return sse_i8_t;
case OP_EXTRACT_I4:
case OP_EXPAND_I4:
return sse_i4_t;
case OP_EXTRACT_I2:
case OP_EXTRACTX_U2:
case OP_EXPAND_I2:
return sse_i2_t;
case OP_EXTRACT_I1:
case OP_EXPAND_I1:
return sse_i1_t;
case OP_EXTRACT_R4:
case OP_EXPAND_R4:
return sse_r4_t;
case OP_CVTPD2DQ:
case OP_CVTPD2PS:
case OP_CVTTPD2DQ:
return sse_r8_t;
case OP_CVTPS2DQ:
case OP_CVTTPS2DQ:
return sse_r4_t;
case OP_SQRTPS:
case OP_RSQRTPS:
case OP_DUPPS_LOW:
case OP_DUPPS_HIGH:
return sse_r4_t;
case OP_SQRTPD:
case OP_DUPPD:
return sse_r8_t;
default:
g_assert_not_reached ();
return NULL;
}
#else
return NULL;
#endif
}
static void
set_cold_cconv (LLVMValueRef func)
{
/*
* xcode10 (watchOS) and ARM/ARM64 doesn't seem to support preserveall, it fails with:
* fatal error: error in backend: Unsupported calling convention
*/
#if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64)
LLVMSetFunctionCallConv (func, LLVMColdCallConv);
#endif
}
static void
set_call_cold_cconv (LLVMValueRef func)
{
#if !defined(TARGET_WATCHOS) && !defined(TARGET_ARM) && !defined(TARGET_ARM64)
LLVMSetInstructionCallConv (func, LLVMColdCallConv);
#endif
}
/*
* get_bb:
*
* Return the LLVM basic block corresponding to BB.
*/
static LLVMBasicBlockRef
get_bb (EmitContext *ctx, MonoBasicBlock *bb)
{
char bb_name_buf [128];
char *bb_name;
if (ctx->bblocks [bb->block_num].bblock == NULL) {
if (bb->flags & BB_EXCEPTION_HANDLER) {
int clause_index = (mono_get_block_region_notry (ctx->cfg, bb->region) >> 8) - 1;
sprintf (bb_name_buf, "EH_CLAUSE%d_BB%d", clause_index, bb->block_num);
bb_name = bb_name_buf;
} else if (bb->block_num < 256) {
if (!ctx->module->bb_names) {
ctx->module->bb_names_len = 256;
ctx->module->bb_names = g_new0 (char*, ctx->module->bb_names_len);
}
if (!ctx->module->bb_names [bb->block_num]) {
char *n;
n = g_strdup_printf ("BB%d", bb->block_num);
mono_memory_barrier ();
ctx->module->bb_names [bb->block_num] = n;
}
bb_name = ctx->module->bb_names [bb->block_num];
} else {
sprintf (bb_name_buf, "BB%d", bb->block_num);
bb_name = bb_name_buf;
}
ctx->bblocks [bb->block_num].bblock = LLVMAppendBasicBlock (ctx->lmethod, bb_name);
ctx->bblocks [bb->block_num].end_bblock = ctx->bblocks [bb->block_num].bblock;
}
return ctx->bblocks [bb->block_num].bblock;
}
/*
* get_end_bb:
*
* Return the last LLVM bblock corresponding to BB.
* This might not be equal to the bb returned by get_bb () since we need to generate
* multiple LLVM bblocks for a mono bblock to handle throwing exceptions.
*/
static LLVMBasicBlockRef
get_end_bb (EmitContext *ctx, MonoBasicBlock *bb)
{
get_bb (ctx, bb);
return ctx->bblocks [bb->block_num].end_bblock;
}
static LLVMBasicBlockRef
gen_bb (EmitContext *ctx, const char *prefix)
{
char bb_name [128];
sprintf (bb_name, "%s%d", prefix, ++ ctx->ex_index);
return LLVMAppendBasicBlock (ctx->lmethod, bb_name);
}
/*
* resolve_patch:
*
* Return the target of the patch identified by TYPE and TARGET.
*/
static gpointer
resolve_patch (MonoCompile *cfg, MonoJumpInfoType type, gconstpointer target)
{
MonoJumpInfo ji;
ERROR_DECL (error);
gpointer res;
memset (&ji, 0, sizeof (ji));
ji.type = type;
ji.data.target = target;
res = mono_resolve_patch_target (cfg->method, NULL, &ji, FALSE, error);
mono_error_assert_ok (error);
return res;
}
/*
* convert_full:
*
* Emit code to convert the LLVM value V to DTYPE.
*/
static LLVMValueRef
convert_full (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype, gboolean is_unsigned)
{
LLVMTypeRef stype = LLVMTypeOf (v);
if (stype != dtype) {
gboolean ext = FALSE;
/* Extend */
if (dtype == LLVMInt64Type () && (stype == LLVMInt32Type () || stype == LLVMInt16Type () || stype == LLVMInt8Type ()))
ext = TRUE;
else if (dtype == LLVMInt32Type () && (stype == LLVMInt16Type () || stype == LLVMInt8Type ()))
ext = TRUE;
else if (dtype == LLVMInt16Type () && (stype == LLVMInt8Type ()))
ext = TRUE;
if (ext)
return is_unsigned ? LLVMBuildZExt (ctx->builder, v, dtype, "") : LLVMBuildSExt (ctx->builder, v, dtype, "");
if (dtype == LLVMDoubleType () && stype == LLVMFloatType ())
return LLVMBuildFPExt (ctx->builder, v, dtype, "");
/* Trunc */
if (stype == LLVMInt64Type () && (dtype == LLVMInt32Type () || dtype == LLVMInt16Type () || dtype == LLVMInt8Type ()))
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMInt32Type () && (dtype == LLVMInt16Type () || dtype == LLVMInt8Type ()))
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMInt16Type () && dtype == LLVMInt8Type ())
return LLVMBuildTrunc (ctx->builder, v, dtype, "");
if (stype == LLVMDoubleType () && dtype == LLVMFloatType ())
return LLVMBuildFPTrunc (ctx->builder, v, dtype, "");
if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind && LLVMGetTypeKind (dtype) == LLVMPointerTypeKind)
return LLVMBuildBitCast (ctx->builder, v, dtype, "");
if (LLVMGetTypeKind (dtype) == LLVMPointerTypeKind)
return LLVMBuildIntToPtr (ctx->builder, v, dtype, "");
if (LLVMGetTypeKind (stype) == LLVMPointerTypeKind)
return LLVMBuildPtrToInt (ctx->builder, v, dtype, "");
if (mono_arch_is_soft_float ()) {
if (stype == LLVMInt32Type () && dtype == LLVMFloatType ())
return LLVMBuildBitCast (ctx->builder, v, dtype, "");
if (stype == LLVMInt32Type () && dtype == LLVMDoubleType ())
return LLVMBuildBitCast (ctx->builder, LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), ""), dtype, "");
}
if (LLVMGetTypeKind (stype) == LLVMVectorTypeKind && LLVMGetTypeKind (dtype) == LLVMVectorTypeKind) {
if (mono_llvm_get_prim_size_bits (stype) == mono_llvm_get_prim_size_bits (dtype))
return LLVMBuildBitCast (ctx->builder, v, dtype, "");
}
mono_llvm_dump_value (v);
mono_llvm_dump_type (dtype);
printf ("\n");
g_assert_not_reached ();
return NULL;
} else {
return v;
}
}
static LLVMValueRef
convert (EmitContext *ctx, LLVMValueRef v, LLVMTypeRef dtype)
{
return convert_full (ctx, v, dtype, FALSE);
}
static void
emit_memset (EmitContext *ctx, LLVMBuilderRef builder, LLVMValueRef v, LLVMValueRef size, int alignment)
{
LLVMValueRef args [5];
int aindex = 0;
args [aindex ++] = v;
args [aindex ++] = LLVMConstInt (LLVMInt8Type (), 0, FALSE);
args [aindex ++] = size;
args [aindex ++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE);
LLVMBuildCall (builder, get_intrins (ctx, INTRINS_MEMSET), args, aindex, "");
}
/*
* emit_volatile_load:
*
* If vreg is volatile, emit a load from its address.
*/
static LLVMValueRef
emit_volatile_load (EmitContext *ctx, int vreg)
{
MonoType *t;
LLVMValueRef v;
// On arm64, we pass the rgctx in a callee saved
// register on arm64 (x15), and llvm might keep the value in that register
// even through the register is marked as 'reserved' inside llvm.
v = mono_llvm_build_load (ctx->builder, ctx->addresses [vreg], "", TRUE);
t = ctx->vreg_cli_types [vreg];
if (t && !m_type_is_byref (t)) {
/*
* Might have to zero extend since llvm doesn't have
* unsigned types.
*/
if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_U2 || t->type == MONO_TYPE_CHAR || t->type == MONO_TYPE_BOOLEAN)
v = LLVMBuildZExt (ctx->builder, v, LLVMInt32Type (), "");
else if (t->type == MONO_TYPE_I1 || t->type == MONO_TYPE_I2)
v = LLVMBuildSExt (ctx->builder, v, LLVMInt32Type (), "");
else if (t->type == MONO_TYPE_U8)
v = LLVMBuildZExt (ctx->builder, v, LLVMInt64Type (), "");
}
return v;
}
/*
* emit_volatile_store:
*
* If VREG is volatile, emit a store from its value to its address.
*/
static void
emit_volatile_store (EmitContext *ctx, int vreg)
{
MonoInst *var = get_vreg_to_inst (ctx->cfg, vreg);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) {
g_assert (ctx->addresses [vreg]);
#ifdef TARGET_WASM
/* Need volatile stores otherwise the compiler might move them */
mono_llvm_build_store (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg], TRUE, LLVM_BARRIER_NONE);
#else
LLVMBuildStore (ctx->builder, convert (ctx, ctx->values [vreg], type_to_llvm_type (ctx, var->inst_vtype)), ctx->addresses [vreg]);
#endif
}
}
static LLVMTypeRef
sig_to_llvm_sig_no_cinfo (EmitContext *ctx, MonoMethodSignature *sig)
{
LLVMTypeRef ret_type;
LLVMTypeRef *param_types = NULL;
LLVMTypeRef res;
int i, pindex;
ret_type = type_to_llvm_type (ctx, sig->ret);
if (!ctx_ok (ctx))
return NULL;
param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3);
pindex = 0;
if (sig->hasthis)
param_types [pindex ++] = ThisType ();
for (i = 0; i < sig->param_count; ++i)
param_types [pindex ++] = type_to_llvm_arg_type (ctx, sig->params [i]);
if (!ctx_ok (ctx)) {
g_free (param_types);
return NULL;
}
res = LLVMFunctionType (ret_type, param_types, pindex, FALSE);
g_free (param_types);
return res;
}
/*
* sig_to_llvm_sig_full:
*
* Return the LLVM signature corresponding to the mono signature SIG using the
* calling convention information in CINFO. Fill out the parameter mapping information in CINFO.
*/
static LLVMTypeRef
sig_to_llvm_sig_full (EmitContext *ctx, MonoMethodSignature *sig, LLVMCallInfo *cinfo)
{
LLVMTypeRef ret_type;
LLVMTypeRef *param_types = NULL;
LLVMTypeRef res;
int i, j, pindex, vret_arg_pindex = 0;
gboolean vretaddr = FALSE;
MonoType *rtype;
if (!cinfo)
return sig_to_llvm_sig_no_cinfo (ctx, sig);
ret_type = type_to_llvm_type (ctx, sig->ret);
if (!ctx_ok (ctx))
return NULL;
rtype = mini_get_underlying_type (sig->ret);
switch (cinfo->ret.storage) {
case LLVMArgVtypeInReg:
/* LLVM models this by returning an aggregate value */
if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgNone) {
LLVMTypeRef members [2];
members [0] = IntPtrType ();
ret_type = LLVMStructType (members, 1, FALSE);
} else if (cinfo->ret.pair_storage [0] == LLVMArgNone && cinfo->ret.pair_storage [1] == LLVMArgNone) {
/* Empty struct */
ret_type = LLVMVoidType ();
} else if (cinfo->ret.pair_storage [0] == LLVMArgInIReg && cinfo->ret.pair_storage [1] == LLVMArgInIReg) {
LLVMTypeRef members [2];
members [0] = IntPtrType ();
members [1] = IntPtrType ();
ret_type = LLVMStructType (members, 2, FALSE);
} else {
g_assert_not_reached ();
}
break;
case LLVMArgVtypeByVal:
/* Vtype returned normally by val */
break;
case LLVMArgVtypeAsScalar: {
int size = mono_class_value_size (mono_class_from_mono_type_internal (rtype), NULL);
/* LLVM models this by returning an int */
if (size < TARGET_SIZEOF_VOID_P) {
g_assert (cinfo->ret.nslots == 1);
ret_type = LLVMIntType (size * 8);
} else {
g_assert (cinfo->ret.nslots == 1 || cinfo->ret.nslots == 2);
ret_type = LLVMIntType (cinfo->ret.nslots * sizeof (target_mgreg_t) * 8);
}
break;
}
case LLVMArgAsIArgs:
ret_type = LLVMArrayType (IntPtrType (), cinfo->ret.nslots);
break;
case LLVMArgFpStruct: {
/* Vtype returned as a fp struct */
LLVMTypeRef members [16];
/* Have to create our own structure since we don't map fp structures to LLVM fp structures yet */
for (i = 0; i < cinfo->ret.nslots; ++i)
members [i] = cinfo->ret.esize == 8 ? LLVMDoubleType () : LLVMFloatType ();
ret_type = LLVMStructType (members, cinfo->ret.nslots, FALSE);
break;
}
case LLVMArgVtypeByRef:
/* Vtype returned using a hidden argument */
ret_type = LLVMVoidType ();
break;
case LLVMArgVtypeRetAddr:
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
case LLVMArgGsharedvtVariable:
vretaddr = TRUE;
ret_type = LLVMVoidType ();
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (cinfo->ret.esize);
ret_type = LLVMIntType (cinfo->ret.esize * 8);
break;
default:
break;
}
param_types = g_new0 (LLVMTypeRef, (sig->param_count * 8) + 3);
pindex = 0;
if (cinfo->ret.storage == LLVMArgVtypeByRef) {
/*
* Has to be the first argument because of the sret argument attribute
* FIXME: This might conflict with passing 'this' as the first argument, but
* this is only used on arm64 which has a dedicated struct return register.
*/
cinfo->vret_arg_pindex = pindex;
param_types [pindex] = type_to_llvm_arg_type (ctx, sig->ret);
if (!ctx_ok (ctx)) {
g_free (param_types);
return NULL;
}
param_types [pindex] = LLVMPointerType (param_types [pindex], 0);
pindex ++;
}
if (!ctx->llvm_only && cinfo->rgctx_arg) {
cinfo->rgctx_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
}
if (cinfo->imt_arg) {
cinfo->imt_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
}
if (vretaddr) {
/* Compute the index in the LLVM signature where the vret arg needs to be passed */
vret_arg_pindex = pindex;
if (cinfo->vret_arg_index == 1) {
/* Add the slots consumed by the first argument */
LLVMArgInfo *ainfo = &cinfo->args [0];
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
for (j = 0; j < 2; ++j) {
if (ainfo->pair_storage [j] == LLVMArgInIReg)
vret_arg_pindex ++;
}
break;
default:
vret_arg_pindex ++;
}
}
cinfo->vret_arg_pindex = vret_arg_pindex;
}
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
if (sig->hasthis) {
cinfo->this_arg_pindex = pindex;
param_types [pindex ++] = ThisType ();
cinfo->args [0].pindex = cinfo->this_arg_pindex;
}
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &cinfo->args [i + sig->hasthis];
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
ainfo->pindex = pindex;
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
for (j = 0; j < 2; ++j) {
switch (ainfo->pair_storage [j]) {
case LLVMArgInIReg:
param_types [pindex ++] = LLVMIntType (TARGET_SIZEOF_VOID_P * 8);
break;
case LLVMArgNone:
break;
default:
g_assert_not_reached ();
}
}
break;
case LLVMArgVtypeByVal:
param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type);
if (!ctx_ok (ctx))
break;
param_types [pindex] = LLVMPointerType (param_types [pindex], 0);
pindex ++;
break;
case LLVMArgAsIArgs:
if (ainfo->esize == 8)
param_types [pindex] = LLVMArrayType (LLVMInt64Type (), ainfo->nslots);
else
param_types [pindex] = LLVMArrayType (IntPtrType (), ainfo->nslots);
pindex ++;
break;
case LLVMArgVtypeAddr:
case LLVMArgVtypeByRef:
param_types [pindex] = type_to_llvm_arg_type (ctx, ainfo->type);
if (!ctx_ok (ctx))
break;
param_types [pindex] = LLVMPointerType (param_types [pindex], 0);
pindex ++;
break;
case LLVMArgAsFpArgs: {
int j;
/* Emit dummy fp arguments if needed so the rest is passed on the stack */
for (j = 0; j < ainfo->ndummy_fpargs; ++j)
param_types [pindex ++] = LLVMDoubleType ();
for (j = 0; j < ainfo->nslots; ++j)
param_types [pindex ++] = ainfo->esize == 8 ? LLVMDoubleType () : LLVMFloatType ();
break;
}
case LLVMArgVtypeAsScalar:
g_assert_not_reached ();
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (ainfo->esize);
param_types [pindex ++] = LLVMIntType (ainfo->esize * 8);
break;
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
param_types [pindex ++] = LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0);
break;
case LLVMArgGsharedvtVariable:
param_types [pindex ++] = LLVMPointerType (IntPtrType (), 0);
break;
default:
param_types [pindex ++] = type_to_llvm_arg_type (ctx, ainfo->type);
break;
}
}
if (!ctx_ok (ctx)) {
g_free (param_types);
return NULL;
}
if (vretaddr && vret_arg_pindex == pindex)
param_types [pindex ++] = IntPtrType ();
if (ctx->llvm_only && cinfo->rgctx_arg) {
/* Pass the rgctx as the last argument */
cinfo->rgctx_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
} else if (ctx->llvm_only && cinfo->dummy_arg) {
/* Pass a dummy arg last */
cinfo->dummy_arg_pindex = pindex;
param_types [pindex] = ctx->module->ptr_type;
pindex ++;
}
res = LLVMFunctionType (ret_type, param_types, pindex, FALSE);
g_free (param_types);
return res;
}
static LLVMTypeRef
sig_to_llvm_sig (EmitContext *ctx, MonoMethodSignature *sig)
{
return sig_to_llvm_sig_full (ctx, sig, NULL);
}
/*
* LLVMFunctionType1:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType0 (LLVMTypeRef ReturnType,
int IsVarArg)
{
return LLVMFunctionType (ReturnType, NULL, 0, IsVarArg);
}
/*
* LLVMFunctionType1:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType1 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
int IsVarArg)
{
LLVMTypeRef param_types [1];
param_types [0] = ParamType1;
return LLVMFunctionType (ReturnType, param_types, 1, IsVarArg);
}
/*
* LLVMFunctionType2:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType2 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
int IsVarArg)
{
LLVMTypeRef param_types [2];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
return LLVMFunctionType (ReturnType, param_types, 2, IsVarArg);
}
/*
* LLVMFunctionType3:
*
* Create an LLVM function type from the arguments.
*/
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType3 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
LLVMTypeRef ParamType3,
int IsVarArg)
{
LLVMTypeRef param_types [3];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
param_types [2] = ParamType3;
return LLVMFunctionType (ReturnType, param_types, 3, IsVarArg);
}
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType4 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
LLVMTypeRef ParamType3,
LLVMTypeRef ParamType4,
int IsVarArg)
{
LLVMTypeRef param_types [4];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
param_types [2] = ParamType3;
param_types [3] = ParamType4;
return LLVMFunctionType (ReturnType, param_types, 4, IsVarArg);
}
static G_GNUC_UNUSED LLVMTypeRef
LLVMFunctionType5 (LLVMTypeRef ReturnType,
LLVMTypeRef ParamType1,
LLVMTypeRef ParamType2,
LLVMTypeRef ParamType3,
LLVMTypeRef ParamType4,
LLVMTypeRef ParamType5,
int IsVarArg)
{
LLVMTypeRef param_types [5];
param_types [0] = ParamType1;
param_types [1] = ParamType2;
param_types [2] = ParamType3;
param_types [3] = ParamType4;
param_types [4] = ParamType5;
return LLVMFunctionType (ReturnType, param_types, 5, IsVarArg);
}
/*
* create_builder:
*
* Create an LLVM builder and remember it so it can be freed later.
*/
static LLVMBuilderRef
create_builder (EmitContext *ctx)
{
LLVMBuilderRef builder = LLVMCreateBuilder ();
if (mono_use_fast_math)
mono_llvm_set_fast_math (builder);
ctx->builders = g_slist_prepend_mempool (ctx->cfg->mempool, ctx->builders, builder);
emit_default_dbg_loc (ctx, builder);
return builder;
}
static char*
get_aotconst_name (MonoJumpInfoType type, gconstpointer data, int got_offset)
{
char *name;
int len;
switch (type) {
case MONO_PATCH_INFO_JIT_ICALL_ID:
name = g_strdup_printf ("jit_icall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name);
break;
case MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL:
name = g_strdup_printf ("jit_icall_addr_nocall_%s", mono_find_jit_icall_info ((MonoJitICallId)(gsize)data)->name);
break;
case MONO_PATCH_INFO_RGCTX_SLOT_INDEX: {
MonoJumpInfoRgctxEntry *entry = (MonoJumpInfoRgctxEntry*)data;
name = g_strdup_printf ("rgctx_slot_index_%s", mono_rgctx_info_type_to_str (entry->info_type));
break;
}
case MONO_PATCH_INFO_AOT_MODULE:
case MONO_PATCH_INFO_GC_SAFE_POINT_FLAG:
case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR:
case MONO_PATCH_INFO_GC_NURSERY_START:
case MONO_PATCH_INFO_GC_NURSERY_BITS:
case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG:
name = g_strdup_printf ("%s", mono_ji_type_to_string (type));
len = strlen (name);
for (int i = 0; i < len; ++i)
name [i] = tolower (name [i]);
break;
default:
name = g_strdup_printf ("%s_%d", mono_ji_type_to_string (type), got_offset);
len = strlen (name);
for (int i = 0; i < len; ++i)
name [i] = tolower (name [i]);
break;
}
return name;
}
static int
compute_aot_got_offset (MonoLLVMModule *module, MonoJumpInfo *ji, LLVMTypeRef llvm_type)
{
guint32 got_offset = mono_aot_get_got_offset (ji);
LLVMTypeRef lookup_type = (LLVMTypeRef) g_hash_table_lookup (module->got_idx_to_type, GINT_TO_POINTER (got_offset));
if (!lookup_type) {
lookup_type = llvm_type;
} else if (llvm_type != lookup_type) {
lookup_type = module->ptr_type;
} else {
return got_offset;
}
g_hash_table_insert (module->got_idx_to_type, GINT_TO_POINTER (got_offset), lookup_type);
return got_offset;
}
/* Allocate a GOT slot for TYPE/DATA, and emit IR to load it */
static LLVMValueRef
get_aotconst_module (MonoLLVMModule *module, LLVMBuilderRef builder, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type,
guint32 *out_got_offset, MonoJumpInfo **out_ji)
{
guint32 got_offset;
LLVMValueRef load;
MonoJumpInfo tmp_ji;
tmp_ji.type = type;
tmp_ji.data.target = data;
MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji);
if (out_ji)
*out_ji = ji;
got_offset = compute_aot_got_offset (module, ji, llvm_type);
module->max_got_offset = MAX (module->max_got_offset, got_offset);
if (out_got_offset)
*out_got_offset = got_offset;
if (module->static_link && type == MONO_PATCH_INFO_GC_SAFE_POINT_FLAG) {
if (!module->gc_safe_point_flag_var) {
const char *symbol = "mono_polling_required";
module->gc_safe_point_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol);
LLVMSetLinkage (module->gc_safe_point_flag_var, LLVMExternalLinkage);
}
return module->gc_safe_point_flag_var;
}
if (module->static_link && type == MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG) {
if (!module->interrupt_flag_var) {
const char *symbol = "mono_thread_interruption_request_flag";
module->interrupt_flag_var = LLVMAddGlobal (module->lmodule, llvm_type, symbol);
LLVMSetLinkage (module->interrupt_flag_var, LLVMExternalLinkage);
}
return module->interrupt_flag_var;
}
LLVMValueRef const_var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (got_offset));
if (!const_var) {
LLVMTypeRef type = llvm_type;
// FIXME:
char *name = get_aotconst_name (ji->type, ji->data.target, got_offset);
char *symbol = g_strdup_printf ("aotconst_%s", name);
g_free (name);
LLVMValueRef v = LLVMAddGlobal (module->lmodule, type, symbol);
LLVMSetVisibility (v, LLVMHiddenVisibility);
LLVMSetLinkage (v, LLVMInternalLinkage);
LLVMSetInitializer (v, LLVMConstNull (type));
// FIXME:
LLVMSetAlignment (v, 8);
g_hash_table_insert (module->aotconst_vars, GINT_TO_POINTER (got_offset), v);
const_var = v;
}
load = LLVMBuildLoad (builder, const_var, "");
if (mono_aot_is_shared_got_offset (got_offset))
set_invariant_load_flag (load);
if (type == MONO_PATCH_INFO_LDSTR)
set_nonnull_load_flag (load);
load = LLVMBuildBitCast (builder, load, llvm_type, "");
return load;
}
static LLVMValueRef
get_aotconst (EmitContext *ctx, MonoJumpInfoType type, gconstpointer data, LLVMTypeRef llvm_type)
{
MonoCompile *cfg;
guint32 got_offset;
MonoJumpInfo *ji;
LLVMValueRef load;
cfg = ctx->cfg;
load = get_aotconst_module (ctx->module, ctx->builder, type, data, llvm_type, &got_offset, &ji);
ji->next = cfg->patch_info;
cfg->patch_info = ji;
/*
* If the got slot is shared, it means its initialized when the aot image is loaded, so we don't need to
* explicitly initialize it.
*/
if (!mono_aot_is_shared_got_offset (got_offset)) {
//mono_print_ji (ji);
//printf ("\n");
ctx->cfg->got_access_count ++;
}
return load;
}
static LLVMValueRef
get_dummy_aotconst (EmitContext *ctx, LLVMTypeRef llvm_type)
{
LLVMValueRef indexes [2];
LLVMValueRef got_entry_addr, load;
LLVMBuilderRef builder = ctx->builder;
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
got_entry_addr = LLVMBuildGEP (builder, ctx->module->dummy_got_var, indexes, 2, "");
load = LLVMBuildLoad (builder, got_entry_addr, "");
load = convert (ctx, load, llvm_type);
return load;
}
typedef struct {
MonoJumpInfo *ji;
MonoMethod *method;
LLVMValueRef load;
LLVMTypeRef type;
} CallSite;
static LLVMValueRef
get_callee_llvmonly (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data)
{
LLVMValueRef callee;
char *callee_name = NULL;
if (ctx->module->static_link && ctx->module->assembly->image != mono_get_corlib ()) {
if (type == MONO_PATCH_INFO_JIT_ICALL_ID) {
MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data);
g_assert (info);
if (info->func != info->wrapper) {
type = MONO_PATCH_INFO_METHOD;
data = mono_icall_get_wrapper_method (info);
callee_name = mono_aot_get_mangled_method_name ((MonoMethod*)data);
}
} else if (type == MONO_PATCH_INFO_METHOD) {
MonoMethod *method = (MonoMethod*)data;
if (m_class_get_image (method->klass) != ctx->module->assembly->image && mono_aot_is_externally_callable (method))
callee_name = mono_aot_get_mangled_method_name (method);
}
}
if (!callee_name)
callee_name = mono_aot_get_direct_call_symbol (type, data);
if (callee_name) {
/* Directly callable */
// FIXME: Locking
callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name);
if (!callee) {
callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig);
LLVMSetVisibility (callee, LLVMHiddenVisibility);
g_hash_table_insert (ctx->module->direct_callables, (char*)callee_name, callee);
} else {
/* LLVMTypeRef's are uniqued */
if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig)
return LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0));
g_free (callee_name);
}
return callee;
}
/*
* Change references to icalls/pinvokes/jit icalls to their wrappers when in corlib, so
* they can be called directly.
*/
if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_JIT_ICALL_ID) {
MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data);
if (info->func != info->wrapper) {
type = MONO_PATCH_INFO_METHOD;
data = mono_icall_get_wrapper_method (info);
}
}
if (ctx->module->assembly->image == mono_get_corlib () && type == MONO_PATCH_INFO_METHOD) {
MonoMethod *method = (MonoMethod*)data;
if (m_method_is_icall (method) || m_method_is_pinvoke (method))
data = mono_marshal_get_native_wrapper (method, TRUE, TRUE);
}
/*
* Instead of emitting an indirect call through a got slot, emit a placeholder, and
* replace it with a direct call or an indirect call in mono_llvm_fixup_aot_module ()
* after all methods have been emitted.
*/
if (type == MONO_PATCH_INFO_METHOD) {
MonoMethod *method = (MonoMethod*)data;
if (m_class_get_image (method->klass)->assembly == ctx->module->assembly) {
MonoJumpInfo tmp_ji;
tmp_ji.type = type;
tmp_ji.data.target = method;
MonoJumpInfo *ji = mono_aot_patch_info_dup (&tmp_ji);
ji->next = ctx->cfg->patch_info;
ctx->cfg->patch_info = ji;
LLVMTypeRef llvm_type = LLVMPointerType (llvm_sig, 0);
ctx->cfg->got_access_count ++;
CallSite *info = g_new0 (CallSite, 1);
info->method = method;
info->ji = ji;
info->type = llvm_type;
/*
* Emit a dummy load to represent the callee, and either replace it with
* a reference to the llvm method for the callee, or from a load from the
* GOT.
*/
LLVMValueRef load = get_dummy_aotconst (ctx, llvm_type);
info->load = load;
g_ptr_array_add (ctx->callsite_list, info);
return load;
}
}
/*
* All other calls are made through the GOT.
*/
callee = get_aotconst (ctx, type, data, LLVMPointerType (llvm_sig, 0));
return callee;
}
/*
* get_callee:
*
* Return an llvm value representing the callee given by the arguments.
*/
static LLVMValueRef
get_callee (EmitContext *ctx, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data)
{
LLVMValueRef callee;
char *callee_name;
MonoJumpInfo *ji = NULL;
if (ctx->llvm_only)
return get_callee_llvmonly (ctx, llvm_sig, type, data);
callee_name = NULL;
/* Cross-assembly direct calls */
if (type == MONO_PATCH_INFO_METHOD) {
MonoMethod *cmethod = (MonoMethod*)data;
if (m_class_get_image (cmethod->klass) != ctx->module->assembly->image) {
MonoJumpInfo tmp_ji;
memset (&tmp_ji, 0, sizeof (MonoJumpInfo));
tmp_ji.type = type;
tmp_ji.data.target = data;
if (mono_aot_is_direct_callable (&tmp_ji)) {
/*
* This will add a reference to cmethod's image so it will
* be loaded when the current AOT image is loaded, so
* the GOT slots used by the init method code are initialized.
*/
tmp_ji.type = MONO_PATCH_INFO_IMAGE;
tmp_ji.data.image = m_class_get_image (cmethod->klass);
ji = mono_aot_patch_info_dup (&tmp_ji);
mono_aot_get_got_offset (ji);
callee_name = mono_aot_get_mangled_method_name (cmethod);
callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->direct_callables, callee_name);
if (!callee) {
callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig);
LLVMSetLinkage (callee, LLVMExternalLinkage);
g_hash_table_insert (ctx->module->direct_callables, callee_name, callee);
} else {
/* LLVMTypeRef's are uniqued */
if (LLVMGetElementType (LLVMTypeOf (callee)) != llvm_sig)
callee = LLVMConstBitCast (callee, LLVMPointerType (llvm_sig, 0));
g_free (callee_name);
}
return callee;
}
}
}
callee_name = mono_aot_get_plt_symbol (type, data);
if (!callee_name)
return NULL;
if (ctx->cfg->compile_aot)
/* Add a patch so referenced wrappers can be compiled in full aot mode */
mono_add_patch_info (ctx->cfg, 0, type, data);
// FIXME: Locking
callee = (LLVMValueRef)g_hash_table_lookup (ctx->module->plt_entries, callee_name);
if (!callee) {
callee = LLVMAddFunction (ctx->lmodule, callee_name, llvm_sig);
LLVMSetVisibility (callee, LLVMHiddenVisibility);
g_hash_table_insert (ctx->module->plt_entries, (char*)callee_name, callee);
}
if (ctx->cfg->compile_aot) {
ji = g_new0 (MonoJumpInfo, 1);
ji->type = type;
ji->data.target = data;
g_hash_table_insert (ctx->module->plt_entries_ji, ji, callee);
}
return callee;
}
static LLVMValueRef
get_jit_callee (EmitContext *ctx, const char *name, LLVMTypeRef llvm_sig, MonoJumpInfoType type, gconstpointer data)
{
gpointer target;
// This won't be patched so compile the wrapper immediately
if (type == MONO_PATCH_INFO_JIT_ICALL_ID) {
MonoJitICallInfo * const info = mono_find_jit_icall_info ((MonoJitICallId)(gsize)data);
target = (gpointer)mono_icall_get_wrapper_full (info, TRUE);
} else {
target = resolve_patch (ctx->cfg, type, data);
}
LLVMValueRef tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0)));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
LLVMValueRef callee = LLVMBuildLoad (ctx->builder, tramp_var, "");
return callee;
}
static int
get_handler_clause (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
/* Directly */
if (bb->region != -1 && MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY))
return (bb->region >> 8) - 1;
/* Indirectly */
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (clause, bb->real_offset) && clause->flags == MONO_EXCEPTION_CLAUSE_NONE)
return i;
}
return -1;
}
static MonoExceptionClause *
get_most_deep_clause (MonoCompile *cfg, EmitContext *ctx, MonoBasicBlock *bb)
{
if (bb == cfg->bb_init)
return NULL;
// Since they're sorted by nesting we just need
// the first one that the bb is a member of
for (int i = 0; i < cfg->header->num_clauses; i++) {
MonoExceptionClause *curr = &cfg->header->clauses [i];
if (MONO_OFFSET_IN_CLAUSE (curr, bb->real_offset))
return curr;
}
return NULL;
}
static void
set_metadata_flag (LLVMValueRef v, const char *flag_name)
{
LLVMValueRef md_arg;
int md_kind;
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = LLVMMDString ("mono", 4);
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
static void
set_nonnull_load_flag (LLVMValueRef v)
{
LLVMValueRef md_arg;
int md_kind;
const char *flag_name;
flag_name = "nonnull";
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = LLVMMDString ("<index>", strlen ("<index>"));
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
static void
set_nontemporal_flag (LLVMValueRef v)
{
LLVMValueRef md_arg;
int md_kind;
const char *flag_name;
// FIXME: Cache this
flag_name = "nontemporal";
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = const_int32 (1);
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
static void
set_invariant_load_flag (LLVMValueRef v)
{
LLVMValueRef md_arg;
int md_kind;
const char *flag_name;
// FIXME: Cache this
flag_name = "invariant.load";
md_kind = LLVMGetMDKindID (flag_name, strlen (flag_name));
md_arg = LLVMMDString ("<index>", strlen ("<index>"));
LLVMSetMetadata (v, md_kind, LLVMMDNode (&md_arg, 1));
}
/*
* emit_call:
*
* Emit an LLVM call or invoke instruction depending on whenever the call is inside
* a try region.
*/
static LLVMValueRef
emit_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, LLVMValueRef callee, LLVMValueRef *args, int pindex)
{
MonoCompile *cfg = ctx->cfg;
LLVMValueRef lcall = NULL;
LLVMBuilderRef builder = *builder_ref;
MonoExceptionClause *clause;
if (ctx->llvm_only) {
clause = bb ? get_most_deep_clause (cfg, ctx, bb) : NULL;
// FIXME: Use an invoke only for calls inside try-catch blocks
if (clause && (!cfg->deopt || ctx->has_catch)) {
/*
* Have to use an invoke instead of a call, branching to the
* handler bblock of the clause containing this bblock.
*/
intptr_t key = CLAUSE_END (clause);
LLVMBasicBlockRef lpad_bb = (LLVMBasicBlockRef)g_hash_table_lookup (ctx->exc_meta, (gconstpointer)key);
// FIXME: Find the one that has the lowest end bound for the right start address
// FIXME: Finally + nesting
if (lpad_bb) {
LLVMBasicBlockRef noex_bb = gen_bb (ctx, "CALL_NOEX_BB");
/* Use an invoke */
lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, lpad_bb, "");
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
}
}
} else {
int clause_index = get_handler_clause (cfg, bb);
if (clause_index != -1) {
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *ec = &header->clauses [clause_index];
MonoBasicBlock *tblock;
LLVMBasicBlockRef ex_bb, noex_bb;
/*
* Have to use an invoke instead of a call, branching to the
* handler bblock of the clause containing this bblock.
*/
g_assert (ec->flags == MONO_EXCEPTION_CLAUSE_NONE || ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY || ec->flags == MONO_EXCEPTION_CLAUSE_FAULT);
tblock = cfg->cil_offset_to_bb [ec->handler_offset];
g_assert (tblock);
ctx->bblocks [tblock->block_num].invoke_target = TRUE;
ex_bb = get_bb (ctx, tblock);
noex_bb = gen_bb (ctx, "NOEX_BB");
/* Use an invoke */
lcall = LLVMBuildInvoke (builder, callee, args, pindex, noex_bb, ex_bb, "");
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
}
}
if (!lcall) {
lcall = LLVMBuildCall (builder, callee, args, pindex, "");
ctx->builder = builder;
}
if (builder_ref)
*builder_ref = ctx->builder;
return lcall;
}
static LLVMValueRef
emit_load (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef addr, LLVMValueRef base, const char *name, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier)
{
LLVMValueRef res;
/*
* We emit volatile loads for loads which can fault, because otherwise
* LLVM will generate invalid code when encountering a load from a
* NULL address.
*/
if (barrier != LLVM_BARRIER_NONE)
res = mono_llvm_build_atomic_load (*builder_ref, addr, name, is_volatile, size, barrier);
else
res = mono_llvm_build_load (*builder_ref, addr, name, is_volatile);
return res;
}
static void
emit_store_general (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile, BarrierKind barrier)
{
if (barrier != LLVM_BARRIER_NONE)
mono_llvm_build_aligned_store (*builder_ref, value, addr, barrier, size);
else
mono_llvm_build_store (*builder_ref, value, addr, is_volatile, barrier);
}
static void
emit_store (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, int size, LLVMValueRef value, LLVMValueRef addr, LLVMValueRef base, gboolean is_faulting, gboolean is_volatile)
{
emit_store_general (ctx, bb, builder_ref, size, value, addr, base, is_faulting, is_volatile, LLVM_BARRIER_NONE);
}
/*
* emit_cond_system_exception:
*
* Emit code to throw the exception EXC_TYPE if the condition CMP is false.
* Might set the ctx exception.
*/
static void
emit_cond_system_exception (EmitContext *ctx, MonoBasicBlock *bb, const char *exc_type, LLVMValueRef cmp, gboolean force_explicit)
{
LLVMBasicBlockRef ex_bb, ex2_bb = NULL, noex_bb;
LLVMBuilderRef builder;
MonoClass *exc_class;
LLVMValueRef args [2];
LLVMValueRef callee;
gboolean no_pc = FALSE;
static MonoClass *exc_classes [MONO_EXC_INTRINS_NUM];
if (IS_TARGET_AMD64)
/* Some platforms don't require the pc argument */
no_pc = TRUE;
int exc_id = mini_exception_id_by_name (exc_type);
if (!exc_classes [exc_id])
exc_classes [exc_id] = mono_class_load_from_name (mono_get_corlib (), "System", exc_type);
exc_class = exc_classes [exc_id];
ex_bb = gen_bb (ctx, "EX_BB");
if (ctx->llvm_only)
ex2_bb = gen_bb (ctx, "EX2_BB");
noex_bb = gen_bb (ctx, "NOEX_BB");
LLVMValueRef branch = LLVMBuildCondBr (ctx->builder, cmp, ex_bb, noex_bb);
if (exc_id == MONO_EXC_NULL_REF && !ctx->cfg->disable_llvm_implicit_null_checks && !force_explicit) {
mono_llvm_set_implicit_branch (ctx->builder, branch);
}
/* Emit exception throwing code */
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, ex_bb);
if (ctx->cfg->llvm_only) {
LLVMBuildBr (builder, ex2_bb);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb);
if (exc_id == MONO_EXC_NULL_REF) {
static LLVMTypeRef sig;
if (!sig)
sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
/* Can't cache this */
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception));
emit_call (ctx, bb, &builder, callee, NULL, 0);
} else {
static LLVMTypeRef sig;
if (!sig)
sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE);
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_corlib_exception));
args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE);
emit_call (ctx, bb, &builder, callee, args, 1);
}
LLVMBuildUnreachable (builder);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
ctx->ex_index ++;
return;
}
callee = ctx->module->throw_corlib_exception;
if (!callee) {
LLVMTypeRef sig;
if (no_pc)
sig = LLVMFunctionType1 (LLVMVoidType (), LLVMInt32Type (), FALSE);
else
sig = LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), LLVMPointerType (LLVMInt8Type (), 0), FALSE);
const MonoJitICallId icall_id = MONO_JIT_ICALL_mono_llvm_throw_corlib_exception_abs_trampoline;
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
} else {
/*
* Differences between the LLVM/non-LLVM throw corlib exception trampoline:
* - On x86, LLVM generated code doesn't push the arguments
* - The trampoline takes the throw address as an arguments, not a pc offset.
*/
callee = get_jit_callee (ctx, "llvm_throw_corlib_exception_trampoline", sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
/*
* Make sure that ex_bb starts with the invoke, so the block address points to it, and not to the load
* added by get_jit_callee ().
*/
ex2_bb = gen_bb (ctx, "EX2_BB");
LLVMBuildBr (builder, ex2_bb);
ex_bb = ex2_bb;
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ex2_bb);
}
}
args [0] = LLVMConstInt (LLVMInt32Type (), m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF, FALSE);
/*
* The LLVM mono branch contains changes so a block address can be passed as an
* argument to a call.
*/
if (no_pc) {
emit_call (ctx, bb, &builder, callee, args, 1);
} else {
args [1] = LLVMBlockAddress (ctx->lmethod, ex_bb);
emit_call (ctx, bb, &builder, callee, args, 2);
}
LLVMBuildUnreachable (builder);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
ctx->bblocks [bb->block_num].end_bblock = noex_bb;
ctx->ex_index ++;
return;
}
/*
* emit_args_to_vtype:
*
* Emit code to store the vtype in the arguments args to the address ADDRESS.
*/
static void
emit_args_to_vtype (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args)
{
int j, size, nslots;
MonoClass *klass;
t = mini_get_underlying_type (t);
klass = mono_class_from_mono_type_internal (t);
size = mono_class_value_size (klass, NULL);
if (MONO_CLASS_IS_SIMD (ctx->cfg, klass))
address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), "");
if (ainfo->storage == LLVMArgAsFpArgs)
nslots = ainfo->nslots;
else
nslots = 2;
for (j = 0; j < nslots; ++j) {
LLVMValueRef index [2], addr, daddr;
int part_size = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size;
LLVMTypeRef part_type;
while (part_size != 1 && part_size != 2 && part_size != 4 && part_size < 8)
part_size ++;
if (ainfo->pair_storage [j] == LLVMArgNone)
continue;
switch (ainfo->pair_storage [j]) {
case LLVMArgInIReg: {
part_type = LLVMIntType (part_size * 8);
if (MONO_CLASS_IS_SIMD (ctx->cfg, klass)) {
index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE);
addr = LLVMBuildGEP (builder, address, index, 1, "");
} else {
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), "");
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
}
LLVMBuildStore (builder, convert (ctx, args [j], part_type), LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (part_type, 0), ""));
break;
}
case LLVMArgInFPReg: {
LLVMTypeRef arg_type;
if (ainfo->esize == 8)
arg_type = LLVMDoubleType ();
else
arg_type = LLVMFloatType ();
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), "");
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
LLVMBuildStore (builder, args [j], addr);
break;
}
case LLVMArgNone:
break;
default:
g_assert_not_reached ();
}
size -= TARGET_SIZEOF_VOID_P;
}
}
/*
* emit_vtype_to_args:
*
* Emit code to load a vtype at address ADDRESS into scalar arguments. Store the arguments
* into ARGS, and the number of arguments into NARGS.
*/
static void
emit_vtype_to_args (EmitContext *ctx, LLVMBuilderRef builder, MonoType *t, LLVMValueRef address, LLVMArgInfo *ainfo, LLVMValueRef *args, guint32 *nargs)
{
int pindex = 0;
int j, nslots;
LLVMTypeRef arg_type;
t = mini_get_underlying_type (t);
int32_t size = get_vtype_size_align (t).size;
if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t)))
address = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (LLVMInt8Type (), 0), "");
if (ainfo->storage == LLVMArgAsFpArgs)
nslots = ainfo->nslots;
else
nslots = 2;
for (j = 0; j < nslots; ++j) {
LLVMValueRef index [2], addr, daddr;
int partsize = size > TARGET_SIZEOF_VOID_P ? TARGET_SIZEOF_VOID_P : size;
if (ainfo->pair_storage [j] == LLVMArgNone)
continue;
switch (ainfo->pair_storage [j]) {
case LLVMArgInIReg:
if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (t))) {
index [0] = LLVMConstInt (LLVMInt32Type (), j * TARGET_SIZEOF_VOID_P, FALSE);
addr = LLVMBuildGEP (builder, address, index, 1, "");
} else {
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (IntPtrType (), 0), "");
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
}
args [pindex ++] = convert (ctx, LLVMBuildLoad (builder, LLVMBuildBitCast (ctx->builder, addr, LLVMPointerType (LLVMIntType (partsize * 8), 0), ""), ""), IntPtrType ());
break;
case LLVMArgInFPReg:
if (ainfo->esize == 8)
arg_type = LLVMDoubleType ();
else
arg_type = LLVMFloatType ();
daddr = LLVMBuildBitCast (ctx->builder, address, LLVMPointerType (arg_type, 0), "");
index [0] = LLVMConstInt (LLVMInt32Type (), j, FALSE);
addr = LLVMBuildGEP (builder, daddr, index, 1, "");
args [pindex ++] = LLVMBuildLoad (builder, addr, "");
break;
case LLVMArgNone:
break;
default:
g_assert_not_reached ();
}
size -= TARGET_SIZEOF_VOID_P;
}
*nargs = pindex;
}
static LLVMValueRef
build_alloca_llvm_type_name (EmitContext *ctx, LLVMTypeRef t, int align, const char *name)
{
/*
* Have to place all alloca's at the end of the entry bb, since otherwise they would
* get executed every time control reaches them.
*/
LLVMPositionBuilder (ctx->alloca_builder, get_bb (ctx, ctx->cfg->bb_entry), ctx->last_alloca);
ctx->last_alloca = mono_llvm_build_alloca (ctx->alloca_builder, t, NULL, align, name);
return ctx->last_alloca;
}
static LLVMValueRef
build_alloca_llvm_type (EmitContext *ctx, LLVMTypeRef t, int align)
{
return build_alloca_llvm_type_name (ctx, t, align, "");
}
static LLVMValueRef
build_named_alloca (EmitContext *ctx, MonoType *t, char const *name)
{
MonoClass *k = mono_class_from_mono_type_internal (t);
int align;
g_assert (!mini_is_gsharedvt_variable_type (t));
if (MONO_CLASS_IS_SIMD (ctx->cfg, k))
align = mono_class_value_size (k, NULL);
else
align = mono_class_min_align (k);
/* Sometimes align is not a power of 2 */
while (mono_is_power_of_two (align) == -1)
align ++;
return build_alloca_llvm_type_name (ctx, type_to_llvm_type (ctx, t), align, name);
}
static LLVMValueRef
build_alloca (EmitContext *ctx, MonoType *t)
{
return build_named_alloca (ctx, t, "");
}
static LLVMValueRef
emit_gsharedvt_ldaddr (EmitContext *ctx, int vreg)
{
/*
* gsharedvt local.
* Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
*/
MonoCompile *cfg = ctx->cfg;
LLVMBuilderRef builder = ctx->builder;
LLVMValueRef offset, offset_var;
LLVMValueRef info_var = ctx->values [cfg->gsharedvt_info_var->dreg];
LLVMValueRef locals_var = ctx->values [cfg->gsharedvt_locals_var->dreg];
LLVMValueRef ptr;
char *name;
g_assert (info_var);
g_assert (locals_var);
int idx = cfg->gsharedvt_vreg_to_idx [vreg] - 1;
offset = LLVMConstInt (LLVMInt32Type (), MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P), FALSE);
ptr = LLVMBuildAdd (builder, convert (ctx, info_var, IntPtrType ()), convert (ctx, offset, IntPtrType ()), "");
name = g_strdup_printf ("gsharedvt_local_%d_offset", vreg);
offset_var = LLVMBuildLoad (builder, convert (ctx, ptr, LLVMPointerType (LLVMInt32Type (), 0)), name);
return LLVMBuildAdd (builder, convert (ctx, locals_var, IntPtrType ()), convert (ctx, offset_var, IntPtrType ()), "");
}
/*
* Put the global into the 'llvm.used' array to prevent it from being optimized away.
*/
static void
mark_as_used (MonoLLVMModule *module, LLVMValueRef global)
{
if (!module->used)
module->used = g_ptr_array_sized_new (16);
g_ptr_array_add (module->used, global);
}
static void
emit_llvm_used (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMTypeRef used_type;
LLVMValueRef used, *used_elem;
int i;
if (!module->used)
return;
used_type = LLVMArrayType (LLVMPointerType (LLVMInt8Type (), 0), module->used->len);
used = LLVMAddGlobal (lmodule, used_type, "llvm.used");
used_elem = g_new0 (LLVMValueRef, module->used->len);
for (i = 0; i < module->used->len; ++i)
used_elem [i] = LLVMConstBitCast ((LLVMValueRef)g_ptr_array_index (module->used, i), LLVMPointerType (LLVMInt8Type (), 0));
LLVMSetInitializer (used, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), used_elem, module->used->len));
LLVMSetLinkage (used, LLVMAppendingLinkage);
LLVMSetSection (used, "llvm.metadata");
}
/*
* emit_get_method:
*
* Emit a function mapping method indexes to their code
*/
static void
emit_get_method (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func, switch_ins, m;
LLVMBasicBlockRef entry_bb, fail_bb, bb, code_start_bb, code_end_bb, main_bb;
LLVMBasicBlockRef *bbs = NULL;
LLVMTypeRef rtype;
LLVMBuilderRef builder = LLVMCreateBuilder ();
LLVMValueRef table = NULL;
char *name;
int i;
gboolean emit_table = FALSE;
#ifdef TARGET_WASM
/*
* Emit a table of functions instead of a switch statement,
* its very efficient on wasm. This might be usable on
* other platforms too.
*/
emit_table = TRUE;
#endif
rtype = LLVMPointerType (LLVMInt8Type (), 0);
int table_len = module->max_method_idx + 1;
if (emit_table) {
LLVMTypeRef table_type;
LLVMValueRef *table_elems;
char *table_name;
table_type = LLVMArrayType (rtype, table_len);
table_name = g_strdup_printf ("%s_method_table", module->global_prefix);
table = LLVMAddGlobal (lmodule, table_type, table_name);
table_elems = g_new0 (LLVMValueRef, table_len);
for (i = 0; i < table_len; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i));
if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m))
table_elems [i] = LLVMBuildBitCast (builder, m, rtype, "");
else
table_elems [i] = LLVMConstNull (rtype);
}
LLVMSetInitializer (table, LLVMConstArray (LLVMPointerType (LLVMInt8Type (), 0), table_elems, table_len));
}
/*
* Emit a switch statement. Emitting a table of function addresses is smaller/faster,
* but generating code seems safer.
*/
func = LLVMAddFunction (lmodule, module->get_method_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE));
LLVMSetLinkage (func, LLVMExternalLinkage);
LLVMSetVisibility (func, LLVMHiddenVisibility);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->get_method = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
/*
* Return llvm_code_start/llvm_code_end when called with -1/-2.
* Hopefully, the toolchain doesn't reorder these functions. If it does,
* then we will have to find another solution.
*/
name = g_strdup_printf ("BB_CODE_START");
code_start_bb = LLVMAppendBasicBlock (func, name);
g_free (name);
LLVMPositionBuilderAtEnd (builder, code_start_bb);
LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_start, rtype, ""));
name = g_strdup_printf ("BB_CODE_END");
code_end_bb = LLVMAppendBasicBlock (func, name);
g_free (name);
LLVMPositionBuilderAtEnd (builder, code_end_bb);
LLVMBuildRet (builder, LLVMBuildBitCast (builder, module->code_end, rtype, ""));
if (emit_table) {
/*
* Because table_len is computed using the method indexes available for us, it
* might not include methods which are not compiled because of AOT profiles.
* So table_len can be smaller than info->nmethods. Add a bounds check because
* of that.
* switch (index) {
* case -1: return code_start;
* case -2: return code_end;
* default: return index < table_len ? method_table [index] : 0;
*/
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRet (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), rtype, ""));
main_bb = LLVMAppendBasicBlock (func, "MAIN");
LLVMPositionBuilderAtEnd (builder, main_bb);
LLVMValueRef base = table;
LLVMValueRef indexes [2];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMGetParam (func, 0);
LLVMValueRef addr = LLVMBuildGEP (builder, base, indexes, 2, "");
LLVMValueRef res = mono_llvm_build_load (builder, addr, "", FALSE);
LLVMBuildRet (builder, res);
LLVMBasicBlockRef default_bb = LLVMAppendBasicBlock (func, "DEFAULT");
LLVMPositionBuilderAtEnd (builder, default_bb);
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_len, FALSE), "");
LLVMBuildCondBr (builder, cmp, fail_bb, main_bb);
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), default_bb, 0);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb);
} else {
bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1);
for (i = 0; i < module->max_method_idx + 1; ++i) {
name = g_strdup_printf ("BB_%d", i);
bb = LLVMAppendBasicBlock (func, name);
g_free (name);
bbs [i] = bb;
LLVMPositionBuilderAtEnd (builder, bb);
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_lmethod, GINT_TO_POINTER (i));
if (m && !g_hash_table_lookup (module->no_method_table_lmethods, m))
LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, ""));
else
LLVMBuildRet (builder, LLVMConstNull (rtype));
}
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRet (builder, LLVMConstNull (rtype));
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -1, FALSE), code_start_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), -2, FALSE), code_end_bb);
for (i = 0; i < module->max_method_idx + 1; ++i) {
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
}
}
mark_as_used (module, func);
LLVMDisposeBuilder (builder);
}
/*
* emit_get_unbox_tramp:
*
* Emit a function mapping method indexes to their unbox trampoline
*/
static void
emit_get_unbox_tramp (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func, switch_ins, m;
LLVMBasicBlockRef entry_bb, fail_bb, bb;
LLVMBasicBlockRef *bbs;
LLVMTypeRef rtype;
LLVMBuilderRef builder = LLVMCreateBuilder ();
char *name;
int i;
gboolean emit_table = FALSE;
/* Similar to emit_get_method () */
#ifndef TARGET_WATCHOS
emit_table = TRUE;
#endif
rtype = LLVMPointerType (LLVMInt8Type (), 0);
if (emit_table) {
// About 10% of methods have an unbox tramp, so emit a table of indexes for them
// that the runtime can search using a binary search
int len = 0;
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (m)
len ++;
}
LLVMTypeRef table_type, elemtype;
LLVMValueRef *table_elems;
LLVMValueRef table;
char *table_name;
int table_len;
int elemsize;
table_len = len;
elemsize = module->max_method_idx < 65000 ? 2 : 4;
// The index table
elemtype = elemsize == 2 ? LLVMInt16Type () : LLVMInt32Type ();
table_type = LLVMArrayType (elemtype, table_len);
table_name = g_strdup_printf ("%s_unbox_tramp_indexes", module->global_prefix);
table = LLVMAddGlobal (lmodule, table_type, table_name);
table_elems = g_new0 (LLVMValueRef, table_len);
int idx = 0;
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (m)
table_elems [idx ++] = LLVMConstInt (elemtype, i, FALSE);
}
LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len));
module->unbox_tramp_indexes = table;
// The trampoline table
elemtype = rtype;
table_type = LLVMArrayType (elemtype, table_len);
table_name = g_strdup_printf ("%s_unbox_trampolines", module->global_prefix);
table = LLVMAddGlobal (lmodule, table_type, table_name);
table_elems = g_new0 (LLVMValueRef, table_len);
idx = 0;
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (m)
table_elems [idx ++] = LLVMBuildBitCast (builder, m, rtype, "");
}
LLVMSetInitializer (table, LLVMConstArray (elemtype, table_elems, table_len));
module->unbox_trampolines = table;
module->unbox_tramp_num = table_len;
module->unbox_tramp_elemsize = elemsize;
return;
}
func = LLVMAddFunction (lmodule, module->get_unbox_tramp_symbol, LLVMFunctionType1 (rtype, LLVMInt32Type (), FALSE));
LLVMSetLinkage (func, LLVMExternalLinkage);
LLVMSetVisibility (func, LLVMHiddenVisibility);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->get_unbox_tramp = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
bbs = g_new0 (LLVMBasicBlockRef, module->max_method_idx + 1);
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (!m)
continue;
name = g_strdup_printf ("BB_%d", i);
bb = LLVMAppendBasicBlock (func, name);
g_free (name);
bbs [i] = bb;
LLVMPositionBuilderAtEnd (builder, bb);
LLVMBuildRet (builder, LLVMBuildBitCast (builder, m, rtype, ""));
}
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRet (builder, LLVMConstNull (rtype));
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0);
for (i = 0; i < module->max_method_idx + 1; ++i) {
m = (LLVMValueRef)g_hash_table_lookup (module->idx_to_unbox_tramp, GINT_TO_POINTER (i));
if (!m)
continue;
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
}
mark_as_used (module, func);
LLVMDisposeBuilder (builder);
}
/*
* emit_init_aotconst:
*
* Emit a function to initialize the aotconst_ variables. Called by the runtime.
*/
static void
emit_init_aotconst (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder = LLVMCreateBuilder ();
func = LLVMAddFunction (lmodule, module->init_aotconst_symbol, LLVMFunctionType2 (LLVMVoidType (), LLVMInt32Type (), IntPtrType (), FALSE));
LLVMSetLinkage (func, LLVMExternalLinkage);
LLVMSetVisibility (func, LLVMHiddenVisibility);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->init_aotconst_func = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
LLVMPositionBuilderAtEnd (builder, entry_bb);
#ifdef TARGET_WASM
/* Emit a table of aotconst addresses instead of a switch statement to save space */
LLVMValueRef aotconsts;
LLVMTypeRef aotconst_addr_type = LLVMPointerType (module->ptr_type, 0);
int table_size = module->max_got_offset + 1;
LLVMTypeRef aotconst_arr_type = LLVMArrayType (aotconst_addr_type, table_size);
LLVMValueRef aotconst_dummy = LLVMAddGlobal (module->lmodule, module->ptr_type, "aotconst_dummy");
LLVMSetInitializer (aotconst_dummy, LLVMConstNull (module->ptr_type));
LLVMSetVisibility (aotconst_dummy, LLVMHiddenVisibility);
LLVMSetLinkage (aotconst_dummy, LLVMInternalLinkage);
aotconsts = LLVMAddGlobal (module->lmodule, aotconst_arr_type, "aotconsts");
LLVMValueRef *aotconst_init = g_new0 (LLVMValueRef, table_size);
for (int i = 0; i < table_size; ++i) {
LLVMValueRef aotconst = (LLVMValueRef)g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i));
if (aotconst)
aotconst_init [i] = LLVMConstBitCast (aotconst, aotconst_addr_type);
else
aotconst_init [i] = LLVMConstBitCast (aotconst_dummy, aotconst_addr_type);
}
LLVMSetInitializer (aotconsts, LLVMConstArray (aotconst_addr_type, aotconst_init, table_size));
LLVMSetVisibility (aotconsts, LLVMHiddenVisibility);
LLVMSetLinkage (aotconsts, LLVMInternalLinkage);
LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "EXIT_BB");
LLVMBasicBlockRef main_bb = LLVMAppendBasicBlock (func, "BB");
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGE, LLVMGetParam (func, 0), LLVMConstInt (LLVMInt32Type (), table_size, FALSE), "");
LLVMBuildCondBr (builder, cmp, exit_bb, main_bb);
LLVMPositionBuilderAtEnd (builder, main_bb);
LLVMValueRef indexes [2];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMGetParam (func, 0);
LLVMValueRef aotconst_addr = LLVMBuildLoad (builder, LLVMBuildGEP (builder, aotconsts, indexes, 2, ""), "");
LLVMBuildStore (builder, LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), module->ptr_type, ""), aotconst_addr);
LLVMBuildBr (builder, exit_bb);
LLVMPositionBuilderAtEnd (builder, exit_bb);
LLVMBuildRetVoid (builder);
#else
LLVMValueRef switch_ins;
LLVMBasicBlockRef fail_bb, bb;
LLVMBasicBlockRef *bbs = NULL;
char *name;
bbs = g_new0 (LLVMBasicBlockRef, module->max_got_offset + 1);
for (int i = 0; i < module->max_got_offset + 1; ++i) {
name = g_strdup_printf ("BB_%d", i);
bb = LLVMAppendBasicBlock (func, name);
g_free (name);
bbs [i] = bb;
LLVMPositionBuilderAtEnd (builder, bb);
LLVMValueRef var = g_hash_table_lookup (module->aotconst_vars, GINT_TO_POINTER (i));
if (var) {
LLVMValueRef addr = LLVMBuildBitCast (builder, var, LLVMPointerType (IntPtrType (), 0), "");
LLVMBuildStore (builder, LLVMGetParam (func, 1), addr);
}
LLVMBuildRetVoid (builder);
}
fail_bb = LLVMAppendBasicBlock (func, "FAIL");
LLVMPositionBuilderAtEnd (builder, fail_bb);
LLVMBuildRetVoid (builder);
LLVMPositionBuilderAtEnd (builder, entry_bb);
switch_ins = LLVMBuildSwitch (builder, LLVMGetParam (func, 0), fail_bb, 0);
for (int i = 0; i < module->max_got_offset + 1; ++i)
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
#endif
LLVMDisposeBuilder (builder);
}
/* Add a function to mark the beginning of LLVM code */
static void
emit_llvm_code_start (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder;
func = LLVMAddFunction (lmodule, "llvm_code_start", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->code_start = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
LLVMBuildRetVoid (builder);
LLVMDisposeBuilder (builder);
}
/*
* emit_init_func:
*
* Emit functions to initialize LLVM methods.
* These are wrappers around the mini_llvm_init_method () JIT icall.
* The wrappers handle adding the 'amodule' argument, loading the vtable from different locations, and they have
* a cold calling convention.
*/
static LLVMValueRef
emit_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func, indexes [2], args [16], callee, info_var, index_var, inited_var, cmp;
LLVMBasicBlockRef entry_bb, inited_bb, notinited_bb;
LLVMBuilderRef builder;
LLVMTypeRef icall_sig;
const char *wrapper_name = mono_marshal_get_aot_init_wrapper_name (subtype);
LLVMTypeRef func_type = NULL;
LLVMTypeRef arg_type = module->ptr_type;
char *name = g_strdup_printf ("%s_%s", module->global_prefix, wrapper_name);
switch (subtype) {
case AOT_INIT_METHOD:
func_type = LLVMFunctionType1 (LLVMVoidType (), arg_type, FALSE);
break;
case AOT_INIT_METHOD_GSHARED_MRGCTX:
case AOT_INIT_METHOD_GSHARED_VTABLE:
func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, IntPtrType (), FALSE);
break;
case AOT_INIT_METHOD_GSHARED_THIS:
func_type = LLVMFunctionType2 (LLVMVoidType (), arg_type, ObjRefType (), FALSE);
break;
default:
g_assert_not_reached ();
}
func = LLVMAddFunction (lmodule, name, func_type);
info_var = LLVMGetParam (func, 0);
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE);
set_cold_cconv (func);
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
/* Load method_index which is emitted at the start of the method info */
indexes [0] = const_int32 (0);
indexes [1] = const_int32 (0);
// FIXME: Make sure its aligned
index_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, LLVMBuildBitCast (builder, info_var, LLVMPointerType (LLVMInt32Type (), 0), ""), indexes, 1, ""), "method_index");
/* Check for is_inited here as well, since this can be called from JITted code which might not check it */
indexes [0] = const_int32 (0);
indexes [1] = index_var;
inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, module->inited_var, indexes, 2, ""), "is_inited");
cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), "");
inited_bb = LLVMAppendBasicBlock (func, "INITED");
notinited_bb = LLVMAppendBasicBlock (func, "NOT_INITED");
LLVMBuildCondBr (builder, cmp, notinited_bb, inited_bb);
LLVMPositionBuilderAtEnd (builder, notinited_bb);
LLVMValueRef amodule_var = get_aotconst_module (module, builder, MONO_PATCH_INFO_AOT_MODULE, NULL, LLVMPointerType (IntPtrType (), 0), NULL, NULL);
args [0] = LLVMBuildPtrToInt (builder, module->info_var, IntPtrType (), "");
args [1] = LLVMBuildPtrToInt (builder, amodule_var, IntPtrType (), "");
args [2] = info_var;
switch (subtype) {
case AOT_INIT_METHOD:
args [3] = LLVMConstNull (IntPtrType ());
break;
case AOT_INIT_METHOD_GSHARED_VTABLE:
args [3] = LLVMGetParam (func, 1);
break;
case AOT_INIT_METHOD_GSHARED_THIS:
/* Load this->vtable */
args [3] = LLVMBuildBitCast (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), "");
indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoObject, vtable) / SIZEOF_VOID_P);
args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable");
break;
case AOT_INIT_METHOD_GSHARED_MRGCTX:
/* Load mrgctx->vtable */
args [3] = LLVMBuildIntToPtr (builder, LLVMGetParam (func, 1), LLVMPointerType (IntPtrType (), 0), "");
indexes [0] = const_int32 (MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable) / SIZEOF_VOID_P);
args [3] = LLVMBuildLoad (builder, LLVMBuildGEP (builder, args [3], indexes, 1, ""), "vtable");
break;
default:
g_assert_not_reached ();
break;
}
/* Call the mini_llvm_init_method JIT icall */
icall_sig = LLVMFunctionType4 (LLVMVoidType (), IntPtrType (), IntPtrType (), arg_type, IntPtrType (), FALSE);
callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GINT_TO_POINTER (MONO_JIT_ICALL_mini_llvm_init_method), LLVMPointerType (icall_sig, 0), NULL, NULL);
LLVMBuildCall (builder, callee, args, LLVMCountParamTypes (icall_sig), "");
/*
* Set the inited flag
* This is already done by the LLVM methods themselves, but its needed by JITted methods.
*/
indexes [0] = const_int32 (0);
indexes [1] = index_var;
LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, module->inited_var, indexes, 2, ""));
LLVMBuildBr (builder, inited_bb);
LLVMPositionBuilderAtEnd (builder, inited_bb);
LLVMBuildRetVoid (builder);
LLVMVerifyFunction (func, LLVMAbortProcessAction);
LLVMDisposeBuilder (builder);
g_free (name);
return func;
}
/* Emit a wrapper around the parameterless JIT icall ICALL_ID with a cold calling convention */
static LLVMValueRef
emit_icall_cold_wrapper (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoJitICallId icall_id, gboolean aot)
{
LLVMValueRef func, callee;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder;
LLVMTypeRef sig;
char *name;
name = g_strdup_printf ("%s_icall_cold_wrapper_%d", module->global_prefix, icall_id);
func = LLVMAddFunction (lmodule, name, LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE);
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE);
set_cold_cconv (func);
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
if (aot) {
callee = get_aotconst_module (module, builder, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id), LLVMPointerType (sig, 0), NULL, NULL);
} else {
MonoJitICallInfo * const info = mono_find_jit_icall_info (icall_id);
gpointer target = (gpointer)mono_icall_get_wrapper_full (info, TRUE);
LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, LLVMPointerType (sig, 0), name);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (sig, 0)));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
callee = LLVMBuildLoad (builder, tramp_var, "");
}
LLVMBuildCall (builder, callee, NULL, 0, "");
LLVMBuildRetVoid (builder);
LLVMVerifyFunction(func, LLVMAbortProcessAction);
LLVMDisposeBuilder (builder);
return func;
}
/*
* Emit wrappers around the C icalls used to initialize llvm methods, to
* make the calling code smaller and to enable usage of the llvm
* cold calling convention.
*/
static void
emit_init_funcs (MonoLLVMModule *module)
{
for (int i = 0; i < AOT_INIT_METHOD_NUM; ++i)
module->init_methods [i] = emit_init_func (module, i);
}
static LLVMValueRef
get_init_func (MonoLLVMModule *module, MonoAotInitSubtype subtype)
{
return module->init_methods [subtype];
}
static void
emit_gc_safepoint_poll (MonoLLVMModule *module, LLVMModuleRef lmodule, MonoCompile *cfg)
{
gboolean is_aot = cfg == NULL || cfg->compile_aot;
LLVMValueRef func = mono_llvm_get_or_insert_gc_safepoint_poll (lmodule);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
if (is_aot) {
#if TARGET_WIN32
if (module->static_link) {
LLVMSetLinkage (func, LLVMInternalLinkage);
/* Prevent it from being optimized away, leading to asserts inside 'opt' */
mark_as_used (module, func);
} else {
LLVMSetLinkage (func, LLVMWeakODRLinkage);
}
#else
LLVMSetLinkage (func, LLVMWeakODRLinkage);
#endif
} else {
mono_llvm_add_func_attr (func, LLVM_ATTR_OPTIMIZE_NONE); // no need to waste time here, the function is already optimized and will be inlined.
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_INLINE); // optnone attribute requires noinline (but it will be inlined anyway)
if (!module->gc_poll_cold_wrapper_compiled) {
ERROR_DECL (error);
/* Compiling a method here is a bit ugly, but it works */
MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL);
module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error);
mono_error_assert_ok (error);
}
}
LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.entry");
LLVMBasicBlockRef poll_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.poll");
LLVMBasicBlockRef exit_bb = LLVMAppendBasicBlock (func, "gc.safepoint_poll.exit");
LLVMTypeRef ptr_type = LLVMPointerType (IntPtrType (), 0);
LLVMBuilderRef builder = LLVMCreateBuilder ();
/* entry: */
LLVMPositionBuilderAtEnd (builder, entry_bb);
LLVMValueRef poll_val_ptr;
if (is_aot) {
poll_val_ptr = get_aotconst_module (module, builder, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, NULL, ptr_type, NULL, NULL);
} else {
LLVMValueRef poll_val_int = LLVMConstInt (IntPtrType (), (guint64) &mono_polling_required, FALSE);
poll_val_ptr = LLVMBuildIntToPtr (builder, poll_val_int, ptr_type, "");
}
LLVMValueRef poll_val_ptr_load = LLVMBuildLoad (builder, poll_val_ptr, ""); // probably needs to be volatile
LLVMValueRef poll_val = LLVMBuildPtrToInt (builder, poll_val_ptr_load, IntPtrType (), "");
LLVMValueRef poll_val_zero = LLVMConstNull (LLVMTypeOf (poll_val));
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, poll_val, poll_val_zero, "");
mono_llvm_build_weighted_branch (builder, cmp, exit_bb, poll_bb, 1000 /* weight for exit_bb */, 1 /* weight for poll_bb */);
/* poll: */
LLVMPositionBuilderAtEnd (builder, poll_bb);
LLVMValueRef call;
if (is_aot) {
LLVMValueRef icall_wrapper = emit_icall_cold_wrapper (module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, TRUE);
module->gc_poll_cold_wrapper = icall_wrapper;
call = LLVMBuildCall (builder, icall_wrapper, NULL, 0, "");
} else {
// in JIT mode we have to emit @gc.safepoint_poll function for each method (module)
// this function calls gc_poll_cold_wrapper_compiled via a global variable.
// @gc.safepoint_poll will be inlined and can be deleted after -place-safepoints pass.
LLVMTypeRef poll_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
LLVMTypeRef poll_sig_ptr = LLVMPointerType (poll_sig, 0);
gpointer target = resolve_patch (cfg, MONO_PATCH_INFO_ABS, module->gc_poll_cold_wrapper_compiled);
LLVMValueRef tramp_var = LLVMAddGlobal (lmodule, poll_sig_ptr, "mono_threads_state_poll");
LLVMValueRef target_val = LLVMConstInt (LLVMInt64Type (), (guint64) target, FALSE);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (target_val, poll_sig_ptr));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
LLVMValueRef callee = LLVMBuildLoad (builder, tramp_var, "");
call = LLVMBuildCall (builder, callee, NULL, 0, "");
}
set_call_cold_cconv (call);
LLVMBuildBr (builder, exit_bb);
/* exit: */
LLVMPositionBuilderAtEnd (builder, exit_bb);
LLVMBuildRetVoid (builder);
LLVMDisposeBuilder (builder);
}
static void
emit_llvm_code_end (MonoLLVMModule *module)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef func;
LLVMBasicBlockRef entry_bb;
LLVMBuilderRef builder;
func = LLVMAddFunction (lmodule, "llvm_code_end", LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE));
LLVMSetLinkage (func, LLVMInternalLinkage);
mono_llvm_add_func_attr (func, LLVM_ATTR_NO_UNWIND);
module->code_end = func;
entry_bb = LLVMAppendBasicBlock (func, "ENTRY");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, entry_bb);
LLVMBuildRetVoid (builder);
LLVMDisposeBuilder (builder);
}
static void
emit_div_check (EmitContext *ctx, LLVMBuilderRef builder, MonoBasicBlock *bb, MonoInst *ins, LLVMValueRef lhs, LLVMValueRef rhs)
{
gboolean need_div_check = ctx->cfg->backend->need_div_check;
if (bb->region)
/* LLVM doesn't know that these can throw an exception since they are not called through an intrinsic */
need_div_check = TRUE;
if (!need_div_check)
return;
switch (ins->opcode) {
case OP_IDIV:
case OP_LDIV:
case OP_IREM:
case OP_LREM:
case OP_IDIV_UN:
case OP_LDIV_UN:
case OP_IREM_UN:
case OP_LREM_UN:
case OP_IDIV_IMM:
case OP_LDIV_IMM:
case OP_IREM_IMM:
case OP_LREM_IMM:
case OP_IDIV_UN_IMM:
case OP_LDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LREM_UN_IMM: {
LLVMValueRef cmp;
gboolean is_signed = (ins->opcode == OP_IDIV || ins->opcode == OP_LDIV || ins->opcode == OP_IREM || ins->opcode == OP_LREM ||
ins->opcode == OP_IDIV_IMM || ins->opcode == OP_LDIV_IMM || ins->opcode == OP_IREM_IMM || ins->opcode == OP_LREM_IMM);
cmp = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), 0, FALSE), "");
emit_cond_system_exception (ctx, bb, "DivideByZeroException", cmp, FALSE);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
/* b == -1 && a == 0x80000000 */
if (is_signed) {
LLVMValueRef c = (LLVMTypeOf (lhs) == LLVMInt32Type ()) ? LLVMConstInt (LLVMTypeOf (lhs), 0x80000000, FALSE) : LLVMConstInt (LLVMTypeOf (lhs), 0x8000000000000000LL, FALSE);
LLVMValueRef cond1 = LLVMBuildICmp (builder, LLVMIntEQ, rhs, LLVMConstInt (LLVMTypeOf (rhs), -1, FALSE), "");
LLVMValueRef cond2 = LLVMBuildICmp (builder, LLVMIntEQ, lhs, c, "");
cmp = LLVMBuildICmp (builder, LLVMIntEQ, LLVMBuildAnd (builder, cond1, cond2, ""), LLVMConstInt (LLVMInt1Type (), 1, FALSE), "");
emit_cond_system_exception (ctx, bb, "OverflowException", cmp, FALSE);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
}
break;
}
default:
break;
}
}
/*
* emit_method_init:
*
* Emit code to initialize the GOT slots used by the method.
*/
static void
emit_method_init (EmitContext *ctx)
{
LLVMValueRef indexes [16], args [16];
LLVMValueRef inited_var, cmp, call;
LLVMBasicBlockRef inited_bb, notinited_bb;
LLVMBuilderRef builder = ctx->builder;
MonoCompile *cfg = ctx->cfg;
MonoAotInitSubtype subtype;
ctx->module->max_inited_idx = MAX (ctx->module->max_inited_idx, cfg->method_index);
indexes [0] = const_int32 (0);
indexes [1] = const_int32 (cfg->method_index);
inited_var = LLVMBuildLoad (builder, LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, ""), "is_inited");
args [0] = inited_var;
args [1] = LLVMConstInt (LLVMInt8Type (), 1, FALSE);
inited_var = LLVMBuildCall (ctx->builder, get_intrins (ctx, INTRINS_EXPECT_I8), args, 2, "");
cmp = LLVMBuildICmp (builder, LLVMIntEQ, inited_var, LLVMConstInt (LLVMTypeOf (inited_var), 0, FALSE), "");
inited_bb = ctx->inited_bb;
notinited_bb = gen_bb (ctx, "NOTINITED_BB");
ctx->cfg->llvmonly_init_cond = LLVMBuildCondBr (ctx->builder, cmp, notinited_bb, inited_bb);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, notinited_bb);
LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), 0);
char *symbol = g_strdup_printf ("info_dummy_%s", cfg->llvm_method_name);
LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, type, symbol);
g_free (symbol);
cfg->llvm_dummy_info_var = info_var;
int nargs = 0;
args [nargs ++] = convert (ctx, info_var, ctx->module->ptr_type);
switch (cfg->rgctx_access) {
case MONO_RGCTX_ACCESS_MRGCTX:
if (ctx->rgctx_arg) {
args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ());
subtype = AOT_INIT_METHOD_GSHARED_MRGCTX;
} else {
g_assert (ctx->this_arg);
args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ());
subtype = AOT_INIT_METHOD_GSHARED_THIS;
}
break;
case MONO_RGCTX_ACCESS_VTABLE:
args [nargs ++] = convert (ctx, ctx->rgctx_arg, IntPtrType ());
subtype = AOT_INIT_METHOD_GSHARED_VTABLE;
break;
case MONO_RGCTX_ACCESS_THIS:
args [nargs ++] = convert (ctx, ctx->this_arg, ObjRefType ());
subtype = AOT_INIT_METHOD_GSHARED_THIS;
break;
case MONO_RGCTX_ACCESS_NONE:
subtype = AOT_INIT_METHOD;
break;
default:
g_assert_not_reached ();
}
call = LLVMBuildCall (builder, ctx->module->init_methods [subtype], args, nargs, "");
/*
* This enables llvm to keep arguments in their original registers/
* scratch registers, since the call will not clobber them.
*/
set_call_cold_cconv (call);
// Set the inited flag
indexes [0] = const_int32 (0);
indexes [1] = const_int32 (cfg->method_index);
LLVMBuildStore (builder, LLVMConstInt (LLVMInt8Type (), 1, FALSE), LLVMBuildGEP (builder, ctx->module->inited_var, indexes, 2, ""));
LLVMBuildBr (builder, inited_bb);
ctx->bblocks [cfg->bb_entry->block_num].end_bblock = inited_bb;
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, inited_bb);
}
static void
emit_unbox_tramp (EmitContext *ctx, const char *method_name, LLVMTypeRef method_type, LLVMValueRef method, int method_index)
{
/*
* Emit unbox trampoline using a tailcall
*/
LLVMValueRef tramp, call, *args;
LLVMBuilderRef builder;
LLVMBasicBlockRef lbb;
LLVMCallInfo *linfo;
char *tramp_name;
int i, nargs;
tramp_name = g_strdup_printf ("ut_%s", method_name);
tramp = LLVMAddFunction (ctx->module->lmodule, tramp_name, method_type);
LLVMSetLinkage (tramp, LLVMInternalLinkage);
mono_llvm_add_func_attr (tramp, LLVM_ATTR_OPTIMIZE_FOR_SIZE);
//mono_llvm_add_func_attr (tramp, LLVM_ATTR_NO_UNWIND);
linfo = ctx->linfo;
// FIXME: Reduce code duplication with mono_llvm_compile_method () etc.
if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1)
mono_llvm_add_param_attr (LLVMGetParam (tramp, ctx->rgctx_arg_pindex), LLVM_ATTR_IN_REG);
if (ctx->cfg->vret_addr) {
LLVMSetValueName (LLVMGetParam (tramp, linfo->vret_arg_pindex), "vret");
if (linfo->ret.storage == LLVMArgVtypeByRef) {
mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET);
mono_llvm_add_param_attr (LLVMGetParam (tramp, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS);
}
}
lbb = LLVMAppendBasicBlock (tramp, "");
builder = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder, lbb);
nargs = LLVMCountParamTypes (method_type);
args = g_new0 (LLVMValueRef, nargs);
for (i = 0; i < nargs; ++i) {
args [i] = LLVMGetParam (tramp, i);
if (i == ctx->this_arg_pindex) {
LLVMTypeRef arg_type = LLVMTypeOf (args [i]);
args [i] = LLVMBuildPtrToInt (builder, args [i], IntPtrType (), "");
args [i] = LLVMBuildAdd (builder, args [i], LLVMConstInt (IntPtrType (), MONO_ABI_SIZEOF (MonoObject), FALSE), "");
args [i] = LLVMBuildIntToPtr (builder, args [i], arg_type, "");
}
}
call = LLVMBuildCall (builder, method, args, nargs, "");
if (!ctx->llvm_only && ctx->rgctx_arg_pindex != -1)
mono_llvm_add_instr_attr (call, 1 + ctx->rgctx_arg_pindex, LLVM_ATTR_IN_REG);
if (linfo->ret.storage == LLVMArgVtypeByRef)
mono_llvm_add_instr_attr (call, 1 + linfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET);
// FIXME: This causes assertions in clang
//mono_llvm_set_must_tailcall (call);
if (LLVMGetReturnType (method_type) == LLVMVoidType ())
LLVMBuildRetVoid (builder);
else
LLVMBuildRet (builder, call);
g_hash_table_insert (ctx->module->idx_to_unbox_tramp, GINT_TO_POINTER (method_index), tramp);
LLVMDisposeBuilder (builder);
}
#ifdef TARGET_WASM
static void
emit_gc_pin (EmitContext *ctx, LLVMBuilderRef builder, int vreg)
{
LLVMValueRef index0 = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
LLVMValueRef index1 = LLVMConstInt (LLVMInt32Type (), ctx->gc_var_indexes [vreg] - 1, FALSE);
LLVMValueRef indexes [] = { index0, index1 };
LLVMValueRef addr = LLVMBuildGEP (builder, ctx->gc_pin_area, indexes, 2, "");
mono_llvm_build_store (builder, convert (ctx, ctx->values [vreg], IntPtrType ()), addr, TRUE, LLVM_BARRIER_NONE);
}
#endif
/*
* emit_entry_bb:
*
* Emit code to load/convert arguments.
*/
static void
emit_entry_bb (EmitContext *ctx, LLVMBuilderRef builder)
{
int i, j, pindex;
MonoCompile *cfg = ctx->cfg;
MonoMethodSignature *sig = ctx->sig;
LLVMCallInfo *linfo = ctx->linfo;
MonoBasicBlock *bb;
char **names;
LLVMBuilderRef old_builder = ctx->builder;
ctx->builder = builder;
ctx->alloca_builder = create_builder (ctx);
#ifdef TARGET_WASM
/*
* For GC stack scanning to work, allocate an area on the stack and store
* every ref vreg into it after its written. Because the stack is scanned
* conservatively, the objects will be pinned, so the vregs can directly
* reference the objects, there is no need to load them from the stack
* on every access.
*/
ctx->gc_var_indexes = g_new0 (int, cfg->next_vreg);
int ngc_vars = 0;
for (i = 0; i < cfg->next_vreg; ++i) {
if (vreg_is_ref (cfg, i)) {
ctx->gc_var_indexes [i] = ngc_vars + 1;
ngc_vars ++;
}
}
// FIXME: Count only live vregs
ctx->gc_pin_area = build_alloca_llvm_type_name (ctx, LLVMArrayType (IntPtrType (), ngc_vars), 0, "gc_pin");
#endif
/*
* Handle indirect/volatile variables by allocating memory for them
* using 'alloca', and storing their address in a temporary.
*/
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *var = cfg->varinfo [i];
if ((var->opcode == OP_GSHAREDVT_LOCAL || var->opcode == OP_GSHAREDVT_ARG_REGOFFSET))
continue;
if (var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (mini_type_is_vtype (var->inst_vtype) && !MONO_CLASS_IS_SIMD (ctx->cfg, var->klass))) {
if (!ctx_ok (ctx))
return;
/* Could be already created by an OP_VPHI */
if (!ctx->addresses [var->dreg]) {
if (var->flags & MONO_INST_LMF) {
// FIXME: Allocate a smaller struct in the deopt case
int size = cfg->deopt ? MONO_ABI_SIZEOF (MonoLMFExt) : MONO_ABI_SIZEOF (MonoLMF);
ctx->addresses [var->dreg] = build_alloca_llvm_type_name (ctx, LLVMArrayType (LLVMInt8Type (), size), sizeof (target_mgreg_t), "lmf");
} else {
char *name = g_strdup_printf ("vreg_loc_%d", var->dreg);
ctx->addresses [var->dreg] = build_named_alloca (ctx, var->inst_vtype, name);
g_free (name);
}
}
ctx->vreg_cli_types [var->dreg] = var->inst_vtype;
}
}
names = g_new (char *, sig->param_count);
mono_method_get_param_names (cfg->method, (const char **) names);
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis];
int reg = cfg->args [i + sig->hasthis]->dreg;
char *name;
pindex = ainfo->pindex;
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
case LLVMArgAsFpArgs: {
LLVMValueRef args [8];
int j;
pindex += ainfo->ndummy_fpargs;
/* The argument is received as a set of int/fp arguments, store them into the real argument */
memset (args, 0, sizeof (args));
if (ainfo->storage == LLVMArgVtypeInReg) {
args [0] = LLVMGetParam (ctx->lmethod, pindex);
if (ainfo->pair_storage [1] != LLVMArgNone)
args [1] = LLVMGetParam (ctx->lmethod, pindex + 1);
} else {
g_assert (ainfo->nslots <= 8);
for (j = 0; j < ainfo->nslots; ++j)
args [j] = LLVMGetParam (ctx->lmethod, pindex + j);
}
ctx->addresses [reg] = build_alloca (ctx, ainfo->type);
emit_args_to_vtype (ctx, builder, ainfo->type, ctx->addresses [reg], ainfo, args);
break;
}
case LLVMArgVtypeByVal: {
ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex);
break;
}
case LLVMArgVtypeAddr:
case LLVMArgVtypeByRef: {
/* The argument is passed by ref */
ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex);
break;
}
case LLVMArgAsIArgs: {
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
int size;
MonoType *t = mini_get_underlying_type (ainfo->type);
/* The argument is received as an array of ints, store it into the real argument */
ctx->addresses [reg] = build_alloca (ctx, t);
size = mono_class_value_size (mono_class_from_mono_type_internal (t), NULL);
if (size == 0) {
} else if (size < TARGET_SIZEOF_VOID_P) {
/* The upper bits of the registers might not be valid */
LLVMValueRef val = LLVMBuildExtractValue (builder, arg, 0, "");
LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (size * 8), 0));
LLVMBuildStore (ctx->builder, LLVMBuildTrunc (builder, val, LLVMIntType (size * 8), ""), dest);
} else {
LLVMBuildStore (ctx->builder, arg, convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMTypeOf (arg), 0)));
}
break;
}
case LLVMArgVtypeAsScalar:
g_assert_not_reached ();
break;
case LLVMArgWasmVtypeAsScalar: {
MonoType *t = mini_get_underlying_type (ainfo->type);
/* The argument is received as a scalar */
ctx->addresses [reg] = build_alloca (ctx, t);
LLVMValueRef dest = convert (ctx, ctx->addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0));
LLVMBuildStore (ctx->builder, arg, dest);
break;
}
case LLVMArgGsharedvtFixed: {
/* These are non-gsharedvt arguments passed by ref, the rest of the IR treats them as scalars */
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
if (names [i])
name = g_strdup_printf ("arg_%s", names [i]);
else
name = g_strdup_printf ("arg_%d", i);
ctx->values [reg] = LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), name);
break;
}
case LLVMArgGsharedvtFixedVtype: {
LLVMValueRef arg = LLVMGetParam (ctx->lmethod, pindex);
if (names [i])
name = g_strdup_printf ("vtype_arg_%s", names [i]);
else
name = g_strdup_printf ("vtype_arg_%d", i);
/* Non-gsharedvt vtype argument passed by ref, the rest of the IR treats it as a vtype */
g_assert (ctx->addresses [reg]);
LLVMSetValueName (ctx->addresses [reg], name);
LLVMBuildStore (builder, LLVMBuildLoad (builder, convert (ctx, arg, LLVMPointerType (type_to_llvm_type (ctx, ainfo->type), 0)), ""), ctx->addresses [reg]);
break;
}
case LLVMArgGsharedvtVariable:
/* The IR treats these as variables with addresses */
if (!ctx->addresses [reg])
ctx->addresses [reg] = LLVMGetParam (ctx->lmethod, pindex);
break;
default: {
LLVMTypeRef t;
/* Needed to avoid phi argument mismatch errors since operations on pointers produce i32/i64 */
if (m_type_is_byref (ainfo->type))
t = IntPtrType ();
else
t = type_to_llvm_type (ctx, ainfo->type);
ctx->values [reg] = convert_full (ctx, ctx->values [reg], llvm_type_to_stack_type (cfg, t), type_is_unsigned (ctx, ainfo->type));
break;
}
}
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
case LLVMArgVtypeByVal:
case LLVMArgAsIArgs:
// FIXME: Enabling this fails on windows
case LLVMArgVtypeAddr:
case LLVMArgVtypeByRef:
{
if (MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (ainfo->type)))
/* Treat these as normal values */
ctx->values [reg] = LLVMBuildLoad (builder, ctx->addresses [reg], "simd_vtype");
break;
}
default:
break;
}
}
g_free (names);
if (sig->hasthis) {
/* Handle this arguments as inputs to phi nodes */
int reg = cfg->args [0]->dreg;
if (ctx->vreg_types [reg])
ctx->values [reg] = convert (ctx, ctx->values [reg], ctx->vreg_types [reg]);
}
if (cfg->vret_addr)
emit_volatile_store (ctx, cfg->vret_addr->dreg);
if (sig->hasthis)
emit_volatile_store (ctx, cfg->args [0]->dreg);
for (i = 0; i < sig->param_count; ++i)
if (!mini_type_is_vtype (sig->params [i]))
emit_volatile_store (ctx, cfg->args [i + sig->hasthis]->dreg);
if (sig->hasthis && !cfg->rgctx_var && cfg->gshared && !cfg->llvm_only) {
LLVMValueRef this_alloc;
/*
* The exception handling code needs the location where the this argument was
* stored for gshared methods. We create a separate alloca to hold it, and mark it
* with the "mono.this" custom metadata to tell llvm that it needs to save its
* location into the LSDA.
*/
this_alloc = mono_llvm_build_alloca (builder, ThisType (), LLVMConstInt (LLVMInt32Type (), 1, FALSE), 0, "");
/* This volatile store will keep the alloca alive */
mono_llvm_build_store (builder, ctx->values [cfg->args [0]->dreg], this_alloc, TRUE, LLVM_BARRIER_NONE);
set_metadata_flag (this_alloc, "mono.this");
}
if (cfg->rgctx_var) {
if (!(cfg->rgctx_var->flags & MONO_INST_VOLATILE)) {
/* FIXME: This could be volatile even in llvmonly mode if used inside a clause etc. */
g_assert (!ctx->addresses [cfg->rgctx_var->dreg]);
ctx->values [cfg->rgctx_var->dreg] = ctx->rgctx_arg;
} else {
LLVMValueRef rgctx_alloc, store;
/*
* We handle the rgctx arg similarly to the this pointer.
*/
g_assert (ctx->addresses [cfg->rgctx_var->dreg]);
rgctx_alloc = ctx->addresses [cfg->rgctx_var->dreg];
/* This volatile store will keep the alloca alive */
store = mono_llvm_build_store (builder, convert (ctx, ctx->rgctx_arg, IntPtrType ()), rgctx_alloc, TRUE, LLVM_BARRIER_NONE);
(void)store; /* unused */
set_metadata_flag (rgctx_alloc, "mono.this");
}
}
#ifdef TARGET_WASM
/*
* Store ref arguments to the pin area.
* FIXME: This might not be needed, since the caller already does it ?
*/
for (i = 0; i < cfg->num_varinfo; ++i) {
MonoInst *var = cfg->varinfo [i];
if (var->opcode == OP_ARG && vreg_is_ref (cfg, var->dreg) && ctx->values [var->dreg])
emit_gc_pin (ctx, builder, var->dreg);
}
#endif
if (cfg->deopt) {
LLVMValueRef addr, index [2];
MonoMethodHeader *header = cfg->header;
int nfields = (sig->ret->type != MONO_TYPE_VOID ? 1 : 0) + sig->hasthis + sig->param_count + header->num_locals + 2;
LLVMTypeRef *types = g_alloca (nfields * sizeof (LLVMTypeRef));
int findex = 0;
/* method */
types [findex ++] = IntPtrType ();
/* il_offset */
types [findex ++] = LLVMInt32Type ();
int data_start = findex;
/* data */
if (sig->ret->type != MONO_TYPE_VOID)
types [findex ++] = IntPtrType ();
if (sig->hasthis)
types [findex ++] = IntPtrType ();
for (int i = 0; i < sig->param_count; ++i)
types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, sig->params [i]), 0);
for (int i = 0; i < header->num_locals; ++i)
types [findex ++] = LLVMPointerType (type_to_llvm_type (ctx, header->locals [i]), 0);
g_assert (findex == nfields);
char *name = g_strdup_printf ("%s_il_state", ctx->method_name);
LLVMTypeRef il_state_type = LLVMStructCreateNamed (ctx->module->context, name);
LLVMStructSetBody (il_state_type, types, nfields, FALSE);
g_free (name);
ctx->il_state = build_alloca_llvm_type_name (ctx, il_state_type, 0, "il_state");
g_assert (cfg->il_state_var);
ctx->addresses [cfg->il_state_var->dreg] = ctx->il_state;
/* Set il_state->il_offset = -1 */
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
LLVMBuildStore (ctx->builder, LLVMConstInt (types [1], -1, FALSE), addr);
/*
* Set il_state->data [i] to either the address of the arg/local, or NULL.
* Because of mono_liveness_handle_exception_clauses (), all locals used/reachable from
* clauses are supposed to be volatile, so they have an address.
*/
findex = data_start;
if (sig->ret->type != MONO_TYPE_VOID) {
LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret);
ctx->il_state_ret = build_alloca_llvm_type_name (ctx, ret_type, 0, "il_state_ret");
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
LLVMBuildStore (ctx->builder, ctx->il_state_ret, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (ctx->il_state_ret), 0)));
findex ++;
}
for (int i = 0; i < sig->hasthis + sig->param_count; ++i) {
LLVMValueRef var_addr = ctx->addresses [cfg->args [i]->dreg];
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
if (var_addr)
LLVMBuildStore (ctx->builder, var_addr, convert (ctx, addr, LLVMPointerType (LLVMTypeOf (var_addr), 0)));
else
LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr);
findex ++;
}
for (int i = 0; i < header->num_locals; ++i) {
LLVMValueRef var_addr = ctx->addresses [cfg->locals [i]->dreg];
index [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
index [1] = LLVMConstInt (LLVMInt32Type (), findex, FALSE);
addr = LLVMBuildGEP (builder, ctx->il_state, index, 2, "");
if (var_addr)
LLVMBuildStore (ctx->builder, LLVMBuildBitCast (builder, var_addr, types [findex], ""), addr);
else
LLVMBuildStore (ctx->builder, LLVMConstNull (types [findex]), addr);
findex ++;
}
}
/* Initialize the method if needed */
if (cfg->compile_aot) {
/* Emit a location for the initialization code */
ctx->init_bb = gen_bb (ctx, "INIT_BB");
ctx->inited_bb = gen_bb (ctx, "INITED_BB");
LLVMBuildBr (ctx->builder, ctx->init_bb);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb);
ctx->bblocks [cfg->bb_entry->block_num].end_bblock = ctx->inited_bb;
}
/* Compute nesting between clauses */
ctx->nested_in = (GSList**)mono_mempool_alloc0 (cfg->mempool, sizeof (GSList*) * cfg->header->num_clauses);
for (i = 0; i < cfg->header->num_clauses; ++i) {
for (j = 0; j < cfg->header->num_clauses; ++j) {
MonoExceptionClause *clause1 = &cfg->header->clauses [i];
MonoExceptionClause *clause2 = &cfg->header->clauses [j];
if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset)
ctx->nested_in [i] = g_slist_prepend_mempool (cfg->mempool, ctx->nested_in [i], GINT_TO_POINTER (j));
}
}
/*
* For finally clauses, create an indicator variable telling OP_ENDFINALLY whenever
* it needs to continue normally, or return back to the exception handling system.
*/
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
char name [128];
if (!(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER)))
continue;
if (bb->in_scount == 0) {
LLVMValueRef val;
sprintf (name, "finally_ind_bb%d", bb->block_num);
val = LLVMBuildAlloca (builder, LLVMInt32Type (), name);
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), val);
ctx->bblocks [bb->block_num].finally_ind = val;
} else {
/* Create a variable to hold the exception var */
if (!ctx->ex_var)
ctx->ex_var = LLVMBuildAlloca (builder, ObjRefType (), "exvar");
}
}
ctx->builder = old_builder;
}
static gboolean
needs_extra_arg (EmitContext *ctx, MonoMethod *method)
{
WrapperInfo *info = NULL;
/*
* When targeting wasm, the caller and callee signature has to match exactly. This means
* that every method which can be called indirectly need an extra arg since the caller
* will call it through an ftnptr and will pass an extra arg.
*/
if (!ctx->cfg->llvm_only || !ctx->emit_dummy_arg)
return FALSE;
if (method->wrapper_type)
info = mono_marshal_get_wrapper_info (method);
switch (method->wrapper_type) {
case MONO_WRAPPER_OTHER:
if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG)
/* Already have an explicit extra arg */
return FALSE;
break;
case MONO_WRAPPER_MANAGED_TO_NATIVE:
if (strstr (method->name, "icall_wrapper"))
/* These are JIT icall wrappers which are only called from JITted code directly */
return FALSE;
/* Normal icalls can be virtual methods which need an extra arg */
break;
case MONO_WRAPPER_RUNTIME_INVOKE:
case MONO_WRAPPER_ALLOC:
case MONO_WRAPPER_CASTCLASS:
case MONO_WRAPPER_WRITE_BARRIER:
case MONO_WRAPPER_NATIVE_TO_MANAGED:
return FALSE;
case MONO_WRAPPER_STELEMREF:
if (info->subtype != WRAPPER_SUBTYPE_VIRTUAL_STELEMREF)
return FALSE;
break;
case MONO_WRAPPER_MANAGED_TO_MANAGED:
if (info->subtype == WRAPPER_SUBTYPE_STRING_CTOR)
return FALSE;
break;
default:
break;
}
if (method->string_ctor)
return FALSE;
/* These are called from gsharedvt code with an indirect call which doesn't pass an extra arg */
if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero")))
return FALSE;
return TRUE;
}
static inline gboolean
is_supported_callconv (EmitContext *ctx, MonoCallInst *call)
{
#if defined(TARGET_WIN32) && defined(TARGET_AMD64)
gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) ||
(call->signature->call_convention == MONO_CALL_C) ||
(call->signature->call_convention == MONO_CALL_STDCALL);
#else
gboolean result = (call->signature->call_convention == MONO_CALL_DEFAULT) || ((call->signature->call_convention == MONO_CALL_C) && ctx->llvm_only);
#endif
return result;
}
static void
process_call (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef *builder_ref, MonoInst *ins)
{
MonoCompile *cfg = ctx->cfg;
LLVMValueRef *values = ctx->values;
LLVMValueRef *addresses = ctx->addresses;
MonoCallInst *call = (MonoCallInst*)ins;
MonoMethodSignature *sig = call->signature;
LLVMValueRef callee = NULL, lcall;
LLVMValueRef *args;
LLVMCallInfo *cinfo;
GSList *l;
int i, len, nargs;
gboolean vretaddr;
LLVMTypeRef llvm_sig;
gpointer target;
gboolean is_virtual, calli;
LLVMBuilderRef builder = *builder_ref;
/* If both imt and rgctx arg are required, only pass the imt arg, the rgctx trampoline will pass the rgctx */
if (call->imt_arg_reg)
call->rgctx_arg_reg = 0;
if (!is_supported_callconv (ctx, call)) {
set_failure (ctx, "non-default callconv");
return;
}
cinfo = call->cinfo;
g_assert (cinfo);
if (call->rgctx_arg_reg)
cinfo->rgctx_arg = TRUE;
if (call->imt_arg_reg)
cinfo->imt_arg = TRUE;
if (!call->rgctx_arg_reg && call->method && needs_extra_arg (ctx, call->method))
cinfo->dummy_arg = TRUE;
vretaddr = (cinfo->ret.storage == LLVMArgVtypeRetAddr || cinfo->ret.storage == LLVMArgVtypeByRef || cinfo->ret.storage == LLVMArgGsharedvtFixed || cinfo->ret.storage == LLVMArgGsharedvtVariable || cinfo->ret.storage == LLVMArgGsharedvtFixedVtype);
llvm_sig = sig_to_llvm_sig_full (ctx, sig, cinfo);
if (!ctx_ok (ctx))
return;
int const opcode = ins->opcode;
is_virtual = opcode == OP_VOIDCALL_MEMBASE || opcode == OP_CALL_MEMBASE
|| opcode == OP_VCALL_MEMBASE || opcode == OP_LCALL_MEMBASE
|| opcode == OP_FCALL_MEMBASE || opcode == OP_RCALL_MEMBASE
|| opcode == OP_TAILCALL_MEMBASE;
calli = !call->fptr_is_patch && (opcode == OP_VOIDCALL_REG || opcode == OP_CALL_REG
|| opcode == OP_VCALL_REG || opcode == OP_LCALL_REG || opcode == OP_FCALL_REG
|| opcode == OP_RCALL_REG || opcode == OP_TAILCALL_REG);
/* FIXME: Avoid creating duplicate methods */
if (ins->flags & MONO_INST_HAS_METHOD) {
if (is_virtual) {
callee = NULL;
} else {
if (cfg->compile_aot) {
callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_METHOD, call->method);
if (!callee) {
set_failure (ctx, "can't encode patch");
return;
}
} else if (cfg->method == call->method) {
callee = ctx->lmethod;
} else {
ERROR_DECL (error);
static int tramp_index;
char *name;
name = g_strdup_printf ("[tramp_%d] %s", tramp_index, mono_method_full_name (call->method, TRUE));
tramp_index ++;
/*
* Use our trampoline infrastructure for lazy compilation instead of llvm's.
* Make all calls through a global. The address of the global will be saved in
* MonoJitDomainInfo.llvm_jit_callees and updated when the method it refers to is
* compiled.
*/
LLVMValueRef tramp_var = (LLVMValueRef)g_hash_table_lookup (ctx->jit_callees, call->method);
if (!tramp_var) {
target =
mono_create_jit_trampoline (call->method, error);
if (!is_ok (error)) {
set_failure (ctx, mono_error_get_message (error));
mono_error_cleanup (error);
return;
}
tramp_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (llvm_sig, 0), name);
LLVMSetInitializer (tramp_var, LLVMConstIntToPtr (LLVMConstInt (LLVMInt64Type (), (guint64)(size_t)target, FALSE), LLVMPointerType (llvm_sig, 0)));
LLVMSetLinkage (tramp_var, LLVMExternalLinkage);
g_hash_table_insert (ctx->jit_callees, call->method, tramp_var);
}
callee = LLVMBuildLoad (builder, tramp_var, "");
}
}
if (!cfg->llvm_only && call->method && strstr (m_class_get_name (call->method->klass), "AsyncVoidMethodBuilder")) {
/* LLVM miscompiles async methods */
set_failure (ctx, "#13734");
return;
}
} else if (calli) {
} else {
const MonoJitICallId jit_icall_id = call->jit_icall_id;
if (jit_icall_id) {
if (cfg->compile_aot) {
callee = get_callee (ctx, llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id));
if (!callee) {
set_failure (ctx, "can't encode patch");
return;
}
} else {
callee = get_jit_callee (ctx, "", llvm_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (jit_icall_id));
}
} else {
if (cfg->compile_aot) {
callee = NULL;
if (cfg->abs_patches) {
MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr);
if (abs_ji) {
callee = get_callee (ctx, llvm_sig, abs_ji->type, abs_ji->data.target);
if (!callee) {
set_failure (ctx, "can't encode patch");
return;
}
}
}
if (!callee) {
set_failure (ctx, "aot");
return;
}
} else {
if (cfg->abs_patches) {
MonoJumpInfo *abs_ji = (MonoJumpInfo*)g_hash_table_lookup (cfg->abs_patches, call->fptr);
if (abs_ji) {
ERROR_DECL (error);
target = mono_resolve_patch_target (cfg->method, NULL, abs_ji, FALSE, error);
mono_error_assert_ok (error);
callee = get_jit_callee (ctx, "", llvm_sig, abs_ji->type, abs_ji->data.target);
} else {
g_assert_not_reached ();
}
} else {
g_assert_not_reached ();
}
}
}
}
if (is_virtual) {
int size = TARGET_SIZEOF_VOID_P;
LLVMValueRef index;
g_assert (ins->inst_offset % size == 0);
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
callee = convert (ctx, LLVMBuildLoad (builder, LLVMBuildGEP (builder, convert (ctx, values [ins->inst_basereg], LLVMPointerType (LLVMPointerType (IntPtrType (), 0), 0)), &index, 1, ""), ""), LLVMPointerType (llvm_sig, 0));
} else if (calli) {
callee = convert (ctx, values [ins->sreg1], LLVMPointerType (llvm_sig, 0));
} else {
if (ins->flags & MONO_INST_HAS_METHOD) {
}
}
/*
* Collect and convert arguments
*/
nargs = (sig->param_count * 16) + sig->hasthis + vretaddr + call->rgctx_reg + call->imt_arg_reg + call->cinfo->dummy_arg + 1;
len = sizeof (LLVMValueRef) * nargs;
args = g_newa (LLVMValueRef, nargs);
memset (args, 0, len);
l = call->out_ireg_args;
if (call->rgctx_arg_reg) {
g_assert (values [call->rgctx_arg_reg]);
g_assert (cinfo->rgctx_arg_pindex < nargs);
/*
* On ARM, the imt/rgctx argument is passed in a caller save register, but some of our trampolines etc. clobber it, leading to
* problems is LLVM moves the arg assignment earlier. To work around this, save the argument into a stack slot and load
* it using a volatile load.
*/
#ifdef TARGET_ARM
if (!ctx->imt_rgctx_loc)
ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P);
LLVMBuildStore (builder, convert (ctx, ctx->values [call->rgctx_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc);
args [cinfo->rgctx_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
#else
args [cinfo->rgctx_arg_pindex] = convert (ctx, values [call->rgctx_arg_reg], ctx->module->ptr_type);
#endif
}
if (call->imt_arg_reg) {
g_assert (!ctx->llvm_only);
g_assert (values [call->imt_arg_reg]);
g_assert (cinfo->imt_arg_pindex < nargs);
#ifdef TARGET_ARM
if (!ctx->imt_rgctx_loc)
ctx->imt_rgctx_loc = build_alloca_llvm_type (ctx, ctx->module->ptr_type, TARGET_SIZEOF_VOID_P);
LLVMBuildStore (builder, convert (ctx, ctx->values [call->imt_arg_reg], ctx->module->ptr_type), ctx->imt_rgctx_loc);
args [cinfo->imt_arg_pindex] = mono_llvm_build_load (builder, ctx->imt_rgctx_loc, "", TRUE);
#else
args [cinfo->imt_arg_pindex] = convert (ctx, values [call->imt_arg_reg], ctx->module->ptr_type);
#endif
}
switch (cinfo->ret.storage) {
case LLVMArgGsharedvtVariable: {
MonoInst *var = get_vreg_to_inst (cfg, call->inst.dreg);
if (var && var->opcode == OP_GSHAREDVT_LOCAL) {
args [cinfo->vret_arg_pindex] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), IntPtrType ());
} else {
g_assert (addresses [call->inst.dreg]);
args [cinfo->vret_arg_pindex] = convert (ctx, addresses [call->inst.dreg], IntPtrType ());
}
break;
}
default:
if (vretaddr) {
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
g_assert (cinfo->vret_arg_pindex < nargs);
if (cinfo->ret.storage == LLVMArgVtypeByRef)
args [cinfo->vret_arg_pindex] = addresses [call->inst.dreg];
else
args [cinfo->vret_arg_pindex] = LLVMBuildPtrToInt (builder, addresses [call->inst.dreg], IntPtrType (), "");
}
break;
}
/*
* Sometimes the same method is called with two different signatures (i.e. with and without 'this'), so
* use the real callee for argument type conversion.
*/
LLVMTypeRef callee_type = LLVMGetElementType (LLVMTypeOf (callee));
LLVMTypeRef *param_types = (LLVMTypeRef*)g_alloca (sizeof (LLVMTypeRef) * LLVMCountParamTypes (callee_type));
LLVMGetParamTypes (callee_type, param_types);
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
guint32 regpair;
int reg, pindex;
LLVMArgInfo *ainfo = &call->cinfo->args [i];
pindex = ainfo->pindex;
regpair = (guint32)(gssize)(l->data);
reg = regpair & 0xffffff;
args [pindex] = values [reg];
switch (ainfo->storage) {
case LLVMArgVtypeInReg:
case LLVMArgAsFpArgs: {
guint32 nargs;
int j;
for (j = 0; j < ainfo->ndummy_fpargs; ++j)
args [pindex + j] = LLVMConstNull (LLVMDoubleType ());
pindex += ainfo->ndummy_fpargs;
g_assert (addresses [reg]);
emit_vtype_to_args (ctx, builder, ainfo->type, addresses [reg], ainfo, args + pindex, &nargs);
pindex += nargs;
// FIXME: alignment
// FIXME: Get rid of the VMOVE
break;
}
case LLVMArgVtypeByVal:
g_assert (addresses [reg]);
args [pindex] = addresses [reg];
break;
case LLVMArgVtypeAddr :
case LLVMArgVtypeByRef: {
g_assert (addresses [reg]);
args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0));
break;
}
case LLVMArgAsIArgs:
g_assert (addresses [reg]);
if (ainfo->esize == 8)
args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (LLVMInt64Type (), ainfo->nslots), 0)), "");
else
args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMArrayType (IntPtrType (), ainfo->nslots), 0)), "");
break;
case LLVMArgVtypeAsScalar:
g_assert_not_reached ();
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (addresses [reg]);
args [pindex] = LLVMBuildLoad (ctx->builder, convert (ctx, addresses [reg], LLVMPointerType (LLVMIntType (ainfo->esize * 8), 0)), "");
break;
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
g_assert (addresses [reg]);
args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (type_to_llvm_arg_type (ctx, ainfo->type), 0));
break;
case LLVMArgGsharedvtVariable:
g_assert (addresses [reg]);
args [pindex] = convert (ctx, addresses [reg], LLVMPointerType (IntPtrType (), 0));
break;
default:
g_assert (args [pindex]);
if (i == 0 && sig->hasthis)
args [pindex] = convert (ctx, args [pindex], param_types [pindex]);
else
args [pindex] = convert (ctx, args [pindex], type_to_llvm_arg_type (ctx, ainfo->type));
break;
}
g_assert (pindex <= nargs);
l = l->next;
}
if (call->cinfo->dummy_arg) {
g_assert (call->cinfo->dummy_arg_pindex < nargs);
args [call->cinfo->dummy_arg_pindex] = LLVMConstNull (ctx->module->ptr_type);
}
// FIXME: Align call sites
/*
* Emit the call
*/
lcall = emit_call (ctx, bb, &builder, callee, args, LLVMCountParamTypes (llvm_sig));
mono_llvm_nonnull_state_update (ctx, lcall, call->method, args, LLVMCountParamTypes (llvm_sig));
// If we just allocated an object, it's not null.
if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) {
mono_llvm_set_call_nonnull_ret (lcall);
}
if (ins->opcode != OP_TAILCALL && ins->opcode != OP_TAILCALL_MEMBASE && LLVMGetInstructionOpcode (lcall) == LLVMCall)
mono_llvm_set_call_notailcall (lcall);
// Add original method name we are currently emitting as a custom string metadata (the only way to leave comments in LLVM IR)
if (mono_debug_enabled () && call && call->method)
mono_llvm_add_string_metadata (lcall, "managed_name", mono_method_full_name (call->method, TRUE));
// As per the LLVM docs, a function has a noalias return value if and only if
// it is an allocation function. This is an allocation function.
if (call->method && call->method->wrapper_type == MONO_WRAPPER_ALLOC) {
mono_llvm_set_call_noalias_ret (lcall);
// All objects are expected to be 8-byte aligned (SGEN_ALLOC_ALIGN)
mono_llvm_set_alignment_ret (lcall, 8);
}
/*
* Modify cconv and parameter attributes to pass rgctx/imt correctly.
*/
#if defined(MONO_ARCH_IMT_REG) && defined(MONO_ARCH_RGCTX_REG)
g_assert (MONO_ARCH_IMT_REG == MONO_ARCH_RGCTX_REG);
#endif
/* The two can't be used together, so use only one LLVM calling conv to pass them */
g_assert (!(call->rgctx_arg_reg && call->imt_arg_reg));
if (!sig->pinvoke && !cfg->llvm_only)
LLVMSetInstructionCallConv (lcall, LLVMMono1CallConv);
if (cinfo->ret.storage == LLVMArgVtypeByRef)
mono_llvm_add_instr_attr (lcall, 1 + cinfo->vret_arg_pindex, LLVM_ATTR_STRUCT_RET);
if (!ctx->llvm_only && call->rgctx_arg_reg)
mono_llvm_add_instr_attr (lcall, 1 + cinfo->rgctx_arg_pindex, LLVM_ATTR_IN_REG);
if (call->imt_arg_reg)
mono_llvm_add_instr_attr (lcall, 1 + cinfo->imt_arg_pindex, LLVM_ATTR_IN_REG);
/* Add byval attributes if needed */
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &call->cinfo->args [i + sig->hasthis];
if (ainfo && ainfo->storage == LLVMArgVtypeByVal)
mono_llvm_add_instr_attr (lcall, 1 + ainfo->pindex, LLVM_ATTR_BY_VAL);
#ifdef TARGET_WASM
if (ainfo && ainfo->storage == LLVMArgVtypeByRef)
/* This causes llvm to make a copy of the value which is what we need */
mono_llvm_add_instr_byval_attr (lcall, 1 + ainfo->pindex, LLVMGetElementType (param_types [ainfo->pindex]));
#endif
}
gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret));
gboolean should_promote_to_value = FALSE;
const char *load_name = NULL;
/*
* Convert the result. Non-SIMD value types are manipulated via an
* indirection. SIMD value types are represented directly as LLVM vector
* values, and must have a corresponding LLVM value definition in
* `values`.
*/
switch (cinfo->ret.storage) {
case LLVMArgAsIArgs:
case LLVMArgFpStruct:
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE));
break;
case LLVMArgVtypeByVal:
/*
* Only used by amd64 and x86. Only ever used when passing
* arguments; never used for return values.
*/
g_assert_not_reached ();
break;
case LLVMArgVtypeInReg: {
if (LLVMTypeOf (lcall) == LLVMVoidType ())
/* Empty struct */
break;
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_alloca (ctx, sig->ret);
LLVMValueRef regs [2] = { 0 };
regs [0] = LLVMBuildExtractValue (builder, lcall, 0, "");
if (cinfo->ret.pair_storage [1] != LLVMArgNone)
regs [1] = LLVMBuildExtractValue (builder, lcall, 1, "");
emit_args_to_vtype (ctx, builder, sig->ret, addresses [ins->dreg], &cinfo->ret, regs);
load_name = "process_call_vtype_in_reg";
should_promote_to_value = is_simd;
break;
}
case LLVMArgVtypeAsScalar:
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE));
load_name = "process_call_vtype_as_scalar";
should_promote_to_value = is_simd;
break;
case LLVMArgVtypeRetAddr:
case LLVMArgVtypeByRef:
load_name = "process_call_vtype_ret_addr";
should_promote_to_value = is_simd;
break;
case LLVMArgGsharedvtVariable:
break;
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
values [ins->dreg] = LLVMBuildLoad (builder, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0), FALSE), "");
break;
case LLVMArgWasmVtypeAsScalar:
if (!addresses [call->inst.dreg])
addresses [call->inst.dreg] = build_alloca (ctx, sig->ret);
LLVMBuildStore (builder, lcall, convert_full (ctx, addresses [call->inst.dreg], LLVMPointerType (LLVMTypeOf (lcall), 0), FALSE));
break;
default:
if (sig->ret->type != MONO_TYPE_VOID)
/* If the method returns an unsigned value, need to zext it */
values [ins->dreg] = convert_full (ctx, lcall, llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, sig->ret)), type_is_unsigned (ctx, sig->ret));
break;
}
if (should_promote_to_value) {
g_assert (addresses [call->inst.dreg]);
LLVMTypeRef addr_type = LLVMPointerType (type_to_llvm_type (ctx, sig->ret), 0);
LLVMValueRef addr = convert_full (ctx, addresses [call->inst.dreg], addr_type, FALSE);
values [ins->dreg] = LLVMBuildLoad (builder, addr, load_name);
}
*builder_ref = ctx->builder;
}
static void
emit_llvmonly_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc)
{
MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mini_llvmonly_rethrow_exception : MONO_JIT_ICALL_mini_llvmonly_throw_exception;
LLVMValueRef callee = rethrow ? ctx->module->rethrow : ctx->module->throw_icall;
LLVMTypeRef exc_type = type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_exception_class ()));
if (!callee) {
LLVMTypeRef fun_sig = LLVMFunctionType1 (LLVMVoidType (), exc_type, FALSE);
g_assert (ctx->cfg->compile_aot);
callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (icall_id));
}
LLVMValueRef args [2];
args [0] = convert (ctx, exc, exc_type);
emit_call (ctx, bb, &ctx->builder, callee, args, 1);
LLVMBuildUnreachable (ctx->builder);
ctx->builder = create_builder (ctx);
}
static void
emit_throw (EmitContext *ctx, MonoBasicBlock *bb, gboolean rethrow, LLVMValueRef exc)
{
MonoMethodSignature *throw_sig;
LLVMValueRef * const pcallee = rethrow ? &ctx->module->rethrow : &ctx->module->throw_icall;
LLVMValueRef callee = *pcallee;
char const * const icall_name = rethrow ? "mono_arch_rethrow_exception" : "mono_arch_throw_exception";
#ifndef TARGET_X86
const
#endif
MonoJitICallId icall_id = rethrow ? MONO_JIT_ICALL_mono_arch_rethrow_exception : MONO_JIT_ICALL_mono_arch_throw_exception;
if (!callee) {
throw_sig = mono_metadata_signature_alloc (mono_get_corlib (), 1);
throw_sig->ret = m_class_get_byval_arg (mono_get_void_class ());
throw_sig->params [0] = m_class_get_byval_arg (mono_get_object_class ());
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
} else {
#ifdef TARGET_X86
/*
* LLVM doesn't push the exception argument, so we need a different
* trampoline.
*/
icall_id = rethrow ? MONO_JIT_ICALL_mono_llvm_rethrow_exception_trampoline : MONO_JIT_ICALL_mono_llvm_throw_exception_trampoline;
#endif
callee = get_jit_callee (ctx, icall_name, sig_to_llvm_sig (ctx, throw_sig), MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
}
mono_memory_barrier ();
}
LLVMValueRef arg;
arg = convert (ctx, exc, type_to_llvm_type (ctx, m_class_get_byval_arg (mono_get_object_class ())));
emit_call (ctx, bb, &ctx->builder, callee, &arg, 1);
}
static void
emit_resume_eh (EmitContext *ctx, MonoBasicBlock *bb)
{
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception;
LLVMValueRef callee;
LLVMTypeRef fun_sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
g_assert (ctx->cfg->compile_aot);
callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
emit_call (ctx, bb, &ctx->builder, callee, NULL, 0);
LLVMBuildUnreachable (ctx->builder);
ctx->builder = create_builder (ctx);
}
static LLVMValueRef
mono_llvm_emit_clear_exception_call (EmitContext *ctx, LLVMBuilderRef builder)
{
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_clear_exception;
LLVMTypeRef call_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE);
LLVMValueRef callee = NULL;
if (!callee) {
callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
}
g_assert (builder && callee);
return LLVMBuildCall (builder, callee, NULL, 0, "");
}
static LLVMValueRef
mono_llvm_emit_load_exception_call (EmitContext *ctx, LLVMBuilderRef builder)
{
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_load_exception;
LLVMTypeRef call_sig = LLVMFunctionType (ObjRefType (), NULL, 0, FALSE);
LLVMValueRef callee = NULL;
g_assert (ctx->cfg->compile_aot);
if (!callee) {
callee = get_callee (ctx, call_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
}
g_assert (builder && callee);
return LLVMBuildCall (builder, callee, NULL, 0, "load_exception");
}
static LLVMValueRef
mono_llvm_emit_match_exception_call (EmitContext *ctx, LLVMBuilderRef builder, gint32 region_start, gint32 region_end)
{
const char *icall_name = "mini_llvmonly_match_exception";
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_match_exception;
ctx->builder = builder;
LLVMValueRef args[5];
const int num_args = G_N_ELEMENTS (args);
args [0] = convert (ctx, get_aotconst (ctx, MONO_PATCH_INFO_AOT_JIT_INFO, GINT_TO_POINTER (ctx->cfg->method_index), LLVMPointerType (IntPtrType (), 0)), IntPtrType ());
args [1] = LLVMConstInt (LLVMInt32Type (), region_start, 0);
args [2] = LLVMConstInt (LLVMInt32Type (), region_end, 0);
if (ctx->cfg->rgctx_var) {
if (ctx->cfg->llvm_only) {
args [3] = convert (ctx, ctx->rgctx_arg, IntPtrType ());
} else {
LLVMValueRef rgctx_alloc = ctx->addresses [ctx->cfg->rgctx_var->dreg];
g_assert (rgctx_alloc);
args [3] = LLVMBuildLoad (builder, convert (ctx, rgctx_alloc, LLVMPointerType (IntPtrType (), 0)), "");
}
} else {
args [3] = LLVMConstInt (IntPtrType (), 0, 0);
}
if (ctx->this_arg)
args [4] = convert (ctx, ctx->this_arg, IntPtrType ());
else
args [4] = LLVMConstInt (IntPtrType (), 0, 0);
LLVMTypeRef match_sig = LLVMFunctionType5 (LLVMInt32Type (), IntPtrType (), LLVMInt32Type (), LLVMInt32Type (), IntPtrType (), IntPtrType (), FALSE);
LLVMValueRef callee;
g_assert (ctx->cfg->compile_aot);
ctx->builder = builder;
// get_callee expects ctx->builder to be the emitting builder
callee = get_callee (ctx, match_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
g_assert (builder && callee);
g_assert (ctx->ex_var);
return LLVMBuildCall (builder, callee, args, num_args, icall_name);
}
// FIXME: This won't work because the code-finding makes this
// not a constant.
/*#define MONO_PERSONALITY_DEBUG*/
#ifdef MONO_PERSONALITY_DEBUG
static const gboolean use_mono_personality_debug = TRUE;
static const char *default_personality_name = "mono_debug_personality";
#else
static const gboolean use_mono_personality_debug = FALSE;
static const char *default_personality_name = "__gxx_personality_v0";
#endif
static LLVMTypeRef
default_cpp_lpad_exc_signature (void)
{
static LLVMTypeRef sig;
if (!sig) {
LLVMTypeRef signature [2];
signature [0] = LLVMPointerType (LLVMInt8Type (), 0);
signature [1] = LLVMInt32Type ();
sig = LLVMStructType (signature, 2, FALSE);
}
return sig;
}
static LLVMValueRef
get_mono_personality (EmitContext *ctx)
{
LLVMValueRef personality = NULL;
LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE);
g_assert (ctx->cfg->compile_aot);
if (!use_mono_personality_debug) {
personality = LLVMGetNamedFunction (ctx->lmodule, default_personality_name);
} else {
personality = get_callee (ctx, personality_type, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debug_personality));
}
g_assert (personality);
return personality;
}
static LLVMBasicBlockRef
emit_landing_pad (EmitContext *ctx, int group_index, int group_size)
{
MonoCompile *cfg = ctx->cfg;
LLVMBuilderRef old_builder = ctx->builder;
MonoExceptionClause *group_start = cfg->header->clauses + group_index;
LLVMBuilderRef lpadBuilder = create_builder (ctx);
ctx->builder = lpadBuilder;
MonoBasicBlock *handler_bb = cfg->cil_offset_to_bb [CLAUSE_START (group_start)];
g_assert (handler_bb);
// <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+
LLVMValueRef personality = get_mono_personality (ctx);
g_assert (personality);
char *bb_name = g_strdup_printf ("LPAD%d_BB", group_index);
LLVMBasicBlockRef lpad_bb = gen_bb (ctx, bb_name);
g_free (bb_name);
LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb);
LLVMValueRef landing_pad = LLVMBuildLandingPad (lpadBuilder, default_cpp_lpad_exc_signature (), personality, 0, "");
g_assert (landing_pad);
LLVMValueRef cast = LLVMBuildBitCast (lpadBuilder, ctx->module->sentinel_exception, LLVMPointerType (LLVMInt8Type (), 0), "int8TypeInfo");
LLVMAddClause (landing_pad, cast);
if (ctx->cfg->deopt) {
/*
* Call mini_llvmonly_resume_exception_il_state (lmf, il_state)
*
* The call will execute the catch clause and the rest of the method and store the return
* value into ctx->il_state_ret.
*/
if (!ctx->has_catch) {
/* Unused */
LLVMBuildUnreachable (lpadBuilder);
return lpad_bb;
}
const MonoJitICallId icall_id = MONO_JIT_ICALL_mini_llvmonly_resume_exception_il_state;
LLVMValueRef callee;
LLVMValueRef args [2];
LLVMTypeRef fun_sig = LLVMFunctionType2 (LLVMVoidType (), IntPtrType (), IntPtrType (), FALSE);
callee = get_callee (ctx, fun_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (icall_id));
g_assert (ctx->cfg->lmf_var);
g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]);
args [0] = LLVMBuildPtrToInt (ctx->builder, ctx->addresses [ctx->cfg->lmf_var->dreg], IntPtrType (), "");
args [1] = LLVMBuildPtrToInt (ctx->builder, ctx->il_state, IntPtrType (), "");
emit_call (ctx, NULL, &ctx->builder, callee, args, 2);
/* Return the value set in ctx->il_state_ret */
LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (ctx->lmethod)));
LLVMBuilderRef builder = ctx->builder;
LLVMValueRef addr, retval, gep, indexes [2];
switch (ctx->linfo->ret.storage) {
case LLVMArgNone:
LLVMBuildRetVoid (builder);
break;
case LLVMArgNormal:
case LLVMArgWasmVtypeAsScalar:
case LLVMArgVtypeInReg: {
if (ctx->sig->ret->type == MONO_TYPE_VOID) {
LLVMBuildRetVoid (builder);
break;
}
addr = ctx->il_state_ret;
g_assert (addr);
addr = convert (ctx, ctx->il_state_ret, LLVMPointerType (ret_type, 0));
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
gep = LLVMBuildGEP (builder, addr, indexes, 1, "");
LLVMBuildRet (builder, LLVMBuildLoad (builder, gep, ""));
break;
}
case LLVMArgVtypeRetAddr: {
LLVMValueRef ret_addr;
g_assert (cfg->vret_addr);
ret_addr = ctx->values [cfg->vret_addr->dreg];
addr = ctx->il_state_ret;
g_assert (addr);
/* The ret value is in il_state_ret, copy it to the memory pointed to by the vret arg */
ret_type = type_to_llvm_type (ctx, ctx->sig->ret);
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
gep = LLVMBuildGEP (builder, addr, indexes, 1, "");
retval = convert (ctx, LLVMBuildLoad (builder, gep, ""), ret_type);
LLVMBuildStore (builder, retval, convert (ctx, ret_addr, LLVMPointerType (ret_type, 0)));
LLVMBuildRetVoid (builder);
break;
}
default:
g_assert_not_reached ();
break;
}
return lpad_bb;
}
LLVMBasicBlockRef resume_bb = gen_bb (ctx, "RESUME_BB");
LLVMBuilderRef resume_builder = create_builder (ctx);
ctx->builder = resume_builder;
LLVMPositionBuilderAtEnd (resume_builder, resume_bb);
emit_resume_eh (ctx, handler_bb);
// Build match
ctx->builder = lpadBuilder;
LLVMPositionBuilderAtEnd (lpadBuilder, lpad_bb);
gboolean finally_only = TRUE;
MonoExceptionClause *group_cursor = group_start;
for (int i = 0; i < group_size; i ++) {
if (!(group_cursor->flags & MONO_EXCEPTION_CLAUSE_FINALLY || group_cursor->flags & MONO_EXCEPTION_CLAUSE_FAULT))
finally_only = FALSE;
group_cursor++;
}
// FIXME:
// Handle landing pad inlining
if (!finally_only) {
// So at each level of the exception stack we will match the exception again.
// During that match, we need to compare against the handler types for the current
// protected region. We send the try start and end so that we can only check against
// handlers for this lexical protected region.
LLVMValueRef match = mono_llvm_emit_match_exception_call (ctx, lpadBuilder, group_start->try_offset, group_start->try_offset + group_start->try_len);
// if returns -1, resume
LLVMValueRef switch_ins = LLVMBuildSwitch (lpadBuilder, match, resume_bb, group_size);
// else move to that target bb
for (int i = 0; i < group_size; i++) {
MonoExceptionClause *clause = group_start + i;
int clause_index = clause - cfg->header->clauses;
MonoBasicBlock *handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index));
g_assert (handler_bb);
g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
}
} else {
int clause_index = group_start - cfg->header->clauses;
MonoBasicBlock *finally_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (clause_index));
g_assert (finally_bb);
LLVMBuildBr (ctx->builder, ctx->bblocks [finally_bb->block_num].call_handler_target_bb);
}
ctx->builder = old_builder;
return lpad_bb;
}
static LLVMValueRef
create_const_vector (LLVMTypeRef t, const int *vals, int count)
{
g_assert (count <= MAX_VECTOR_ELEMS);
LLVMValueRef llvm_vals [MAX_VECTOR_ELEMS];
for (int i = 0; i < count; i++)
llvm_vals [i] = LLVMConstInt (t, vals [i], FALSE);
return LLVMConstVector (llvm_vals, count);
}
static LLVMValueRef
create_const_vector_i32 (const int *mask, int count)
{
return create_const_vector (LLVMInt32Type (), mask, count);
}
static LLVMValueRef
create_const_vector_4_i32 (int v0, int v1, int v2, int v3)
{
LLVMValueRef mask [4];
mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE);
mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE);
mask [2] = LLVMConstInt (LLVMInt32Type (), v2, FALSE);
mask [3] = LLVMConstInt (LLVMInt32Type (), v3, FALSE);
return LLVMConstVector (mask, 4);
}
static LLVMValueRef
create_const_vector_2_i32 (int v0, int v1)
{
LLVMValueRef mask [2];
mask [0] = LLVMConstInt (LLVMInt32Type (), v0, FALSE);
mask [1] = LLVMConstInt (LLVMInt32Type (), v1, FALSE);
return LLVMConstVector (mask, 2);
}
static LLVMValueRef
broadcast_element (EmitContext *ctx, LLVMValueRef elem, int count)
{
LLVMTypeRef t = LLVMTypeOf (elem);
LLVMTypeRef init_vec_t = LLVMVectorType (t, 1);
LLVMValueRef undef = LLVMGetUndef (init_vec_t);
LLVMValueRef vec = LLVMBuildInsertElement (ctx->builder, undef, elem, const_int32 (0), "");
LLVMValueRef select_zero = LLVMConstNull (LLVMVectorType (LLVMInt32Type (), count));
return LLVMBuildShuffleVector (ctx->builder, vec, undef, select_zero, "broadcast");
}
static LLVMValueRef
broadcast_constant (int const_val, LLVMTypeRef elem_t, int count)
{
int vals [MAX_VECTOR_ELEMS];
for (int i = 0; i < count; ++i)
vals [i] = const_val;
return create_const_vector (elem_t, vals, count);
}
static LLVMValueRef
create_shift_vector (EmitContext *ctx, LLVMValueRef type_donor, LLVMValueRef shiftamt)
{
LLVMTypeRef t = LLVMTypeOf (type_donor);
unsigned int elems = LLVMGetVectorSize (t);
LLVMTypeRef elem_t = LLVMGetElementType (t);
shiftamt = convert_full (ctx, shiftamt, elem_t, TRUE);
shiftamt = broadcast_element (ctx, shiftamt, elems);
return shiftamt;
}
static LLVMTypeRef
to_integral_vector_type (LLVMTypeRef t)
{
unsigned int elems = LLVMGetVectorSize (t);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int bits = mono_llvm_get_prim_size_bits (elem_t);
return LLVMVectorType (LLVMIntType (bits), elems);
}
static LLVMValueRef
bitcast_to_integral (EmitContext *ctx, LLVMValueRef vec)
{
LLVMTypeRef src_t = LLVMTypeOf (vec);
LLVMTypeRef dst_t = to_integral_vector_type (src_t);
if (dst_t != src_t)
return LLVMBuildBitCast (ctx->builder, vec, dst_t, "bc2i");
return vec;
}
static LLVMValueRef
extract_high_elements (EmitContext *ctx, LLVMValueRef src_vec)
{
LLVMTypeRef src_t = LLVMTypeOf (src_vec);
unsigned int src_elems = LLVMGetVectorSize (src_t);
unsigned int dst_elems = src_elems / 2;
int mask [MAX_VECTOR_ELEMS] = { 0 };
for (int i = 0; i < dst_elems; ++i)
mask [i] = dst_elems + i;
return LLVMBuildShuffleVector (ctx->builder, src_vec, LLVMGetUndef (src_t), create_const_vector_i32 (mask, dst_elems), "extract_high");
}
static LLVMValueRef
keep_lowest_element (EmitContext *ctx, LLVMTypeRef dst_t, LLVMValueRef vec)
{
LLVMTypeRef t = LLVMTypeOf (vec);
g_assert (LLVMGetElementType (dst_t) == LLVMGetElementType (t));
unsigned int elems = LLVMGetVectorSize (dst_t);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
mask [0] = 0;
for (unsigned int i = 1; i < elems; ++i)
mask [i] = src_elems;
return LLVMBuildShuffleVector (ctx->builder, vec, LLVMConstNull (t), create_const_vector_i32 (mask, elems), "keep_lowest");
}
static LLVMValueRef
concatenate_vectors (EmitContext *ctx, LLVMValueRef xs, LLVMValueRef ys)
{
LLVMTypeRef t = LLVMTypeOf (xs);
unsigned int elems = LLVMGetVectorSize (t) * 2;
int mask [MAX_VECTOR_ELEMS] = { 0 };
for (int i = 0; i < elems; ++i)
mask [i] = i;
return LLVMBuildShuffleVector (ctx->builder, xs, ys, create_const_vector_i32 (mask, elems), "concat_vecs");
}
static LLVMValueRef
scalar_from_vector (EmitContext *ctx, LLVMValueRef xs)
{
return LLVMBuildExtractElement (ctx->builder, xs, const_int32 (0), "v2s");
}
static LLVMValueRef
vector_from_scalar (EmitContext *ctx, LLVMTypeRef type, LLVMValueRef x)
{
return LLVMBuildInsertElement (ctx->builder, LLVMConstNull (type), x, const_int32 (0), "s2v");
}
typedef struct {
EmitContext *ctx;
MonoBasicBlock *bb;
LLVMBasicBlockRef continuation;
LLVMValueRef phi;
LLVMValueRef switch_ins;
LLVMBasicBlockRef tmp_block;
LLVMBasicBlockRef default_case;
LLVMTypeRef switch_index_type;
const char *name;
int max_cases;
int i;
} ImmediateUnrollCtx;
static ImmediateUnrollCtx
immediate_unroll_begin (
EmitContext *ctx, MonoBasicBlock *bb, int max_cases,
LLVMValueRef switch_index, LLVMTypeRef return_type, const char *name)
{
LLVMBasicBlockRef default_case = gen_bb (ctx, name);
LLVMBasicBlockRef continuation = gen_bb (ctx, name);
LLVMValueRef switch_ins = LLVMBuildSwitch (ctx->builder, switch_index, default_case, max_cases);
LLVMPositionBuilderAtEnd (ctx->builder, continuation);
LLVMValueRef phi = LLVMBuildPhi (ctx->builder, return_type, name);
ImmediateUnrollCtx ictx = { 0 };
ictx.ctx = ctx;
ictx.bb = bb;
ictx.continuation = continuation;
ictx.phi = phi;
ictx.switch_ins = switch_ins;
ictx.default_case = default_case;
ictx.switch_index_type = LLVMTypeOf (switch_index);
ictx.name = name;
ictx.max_cases = max_cases;
return ictx;
}
static gboolean
immediate_unroll_next (ImmediateUnrollCtx *ictx, int *i)
{
if (ictx->i >= ictx->max_cases)
return FALSE;
ictx->tmp_block = gen_bb (ictx->ctx, ictx->name);
LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->tmp_block);
*i = ictx->i;
++ictx->i;
return TRUE;
}
static void
immediate_unroll_commit (ImmediateUnrollCtx *ictx, int switch_const, LLVMValueRef value)
{
LLVMBuildBr (ictx->ctx->builder, ictx->continuation);
LLVMAddCase (ictx->switch_ins, LLVMConstInt (ictx->switch_index_type, switch_const, FALSE), ictx->tmp_block);
LLVMAddIncoming (ictx->phi, &value, &ictx->tmp_block, 1);
}
static void
immediate_unroll_default (ImmediateUnrollCtx *ictx)
{
LLVMPositionBuilderAtEnd (ictx->ctx->builder, ictx->default_case);
}
static void
immediate_unroll_commit_default (ImmediateUnrollCtx *ictx, LLVMValueRef value)
{
LLVMBuildBr (ictx->ctx->builder, ictx->continuation);
LLVMAddIncoming (ictx->phi, &value, &ictx->default_case, 1);
}
static void
immediate_unroll_unreachable_default (ImmediateUnrollCtx *ictx)
{
immediate_unroll_default (ictx);
LLVMBuildUnreachable (ictx->ctx->builder);
}
static LLVMValueRef
immediate_unroll_end (ImmediateUnrollCtx *ictx, LLVMBasicBlockRef *continuation)
{
EmitContext *ctx = ictx->ctx;
LLVMBuilderRef builder = ctx->builder;
LLVMPositionBuilderAtEnd (builder, ictx->continuation);
*continuation = ictx->continuation;
ctx->bblocks [ictx->bb->block_num].end_bblock = ictx->continuation;
return ictx->phi;
}
typedef struct {
EmitContext *ctx;
LLVMTypeRef intermediate_type;
LLVMTypeRef return_type;
gboolean needs_fake_scalar_op;
llvm_ovr_tag_t ovr_tag;
} ScalarOpFromVectorOpCtx;
static inline gboolean
check_needs_fake_scalar_op (MonoTypeEnum type)
{
#if defined(TARGET_ARM64)
switch (type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
return TRUE;
}
#endif
return FALSE;
}
static ScalarOpFromVectorOpCtx
scalar_op_from_vector_op (EmitContext *ctx, LLVMTypeRef return_type, MonoInst *ins)
{
ScalarOpFromVectorOpCtx ret = { 0 };
ret.ctx = ctx;
ret.intermediate_type = return_type;
ret.return_type = return_type;
ret.needs_fake_scalar_op = check_needs_fake_scalar_op (inst_c1_type (ins));
ret.ovr_tag = ovr_tag_from_llvm_type (return_type);
if (!ret.needs_fake_scalar_op) {
ret.ovr_tag = ovr_tag_force_scalar (ret.ovr_tag);
ret.intermediate_type = ovr_tag_to_llvm_type (ret.ovr_tag);
}
return ret;
}
static void
scalar_op_from_vector_op_process_args (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef *args, int num_args)
{
if (!sctx->needs_fake_scalar_op)
for (int i = 0; i < num_args; ++i)
args [i] = scalar_from_vector (sctx->ctx, args [i]);
}
static LLVMValueRef
scalar_op_from_vector_op_process_result (ScalarOpFromVectorOpCtx *sctx, LLVMValueRef result)
{
if (sctx->needs_fake_scalar_op)
return keep_lowest_element (sctx->ctx, LLVMTypeOf (result), result);
return vector_from_scalar (sctx->ctx, sctx->return_type, result);
}
static void
emit_llvmonly_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBasicBlockRef cbb)
{
int clause_index = MONO_REGION_CLAUSE_INDEX (bb->region);
MonoExceptionClause *clause = &ctx->cfg->header->clauses [clause_index];
// Make exception available to catch blocks
if (!(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY || clause->flags & MONO_EXCEPTION_CLAUSE_FAULT)) {
LLVMValueRef mono_exc = mono_llvm_emit_load_exception_call (ctx, ctx->builder);
g_assert (ctx->ex_var);
LLVMBuildStore (ctx->builder, LLVMBuildBitCast (ctx->builder, mono_exc, ObjRefType (), ""), ctx->ex_var);
if (bb->in_scount == 1) {
MonoInst *exvar = bb->in_stack [0];
g_assert (!ctx->values [exvar->dreg]);
g_assert (ctx->ex_var);
ctx->values [exvar->dreg] = LLVMBuildLoad (ctx->builder, ctx->ex_var, "save_exception");
emit_volatile_store (ctx, exvar->dreg);
}
mono_llvm_emit_clear_exception_call (ctx, ctx->builder);
}
#ifdef TARGET_WASM
if (ctx->cfg->lmf_var && !ctx->cfg->deopt) {
LLVMValueRef callee;
LLVMValueRef args [1];
LLVMTypeRef sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE);
/*
* There might be an LMF on the stack inserted to enable stack walking, see
* method_needs_stack_walk (). If an exception is thrown, the LMF popping code
* is not executed, so do it here.
*/
g_assert (ctx->addresses [ctx->cfg->lmf_var->dreg]);
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_pop_lmf));
args [0] = convert (ctx, ctx->addresses [ctx->cfg->lmf_var->dreg], ctx->module->ptr_type);
emit_call (ctx, bb, &ctx->builder, callee, args, 1);
}
#endif
LLVMBuilderRef handler_builder = create_builder (ctx);
LLVMBasicBlockRef target_bb = ctx->bblocks [bb->block_num].call_handler_target_bb;
LLVMPositionBuilderAtEnd (handler_builder, target_bb);
// Make the handler code end with a jump to cbb
LLVMBuildBr (handler_builder, cbb);
}
static void
emit_handler_start (EmitContext *ctx, MonoBasicBlock *bb, LLVMBuilderRef builder)
{
MonoCompile *cfg = ctx->cfg;
LLVMValueRef *values = ctx->values;
LLVMModuleRef lmodule = ctx->lmodule;
BBInfo *bblocks = ctx->bblocks;
LLVMTypeRef i8ptr;
LLVMValueRef personality;
LLVMValueRef landing_pad;
LLVMBasicBlockRef target_bb;
MonoInst *exvar;
static int ti_generator;
char ti_name [128];
LLVMValueRef type_info;
int clause_index;
GSList *l;
// <resultval> = landingpad <somety> personality <type> <pers_fn> <clause>+
if (cfg->compile_aot) {
/* Use a dummy personality function */
personality = LLVMGetNamedFunction (lmodule, "mono_personality");
g_assert (personality);
} else {
/* Can't cache this as each method is in its own llvm module */
LLVMTypeRef personality_type = LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE);
personality = LLVMAddFunction (ctx->lmodule, "mono_personality", personality_type);
mono_llvm_add_func_attr (personality, LLVM_ATTR_NO_UNWIND);
LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (personality, "ENTRY");
LLVMBuilderRef builder2 = LLVMCreateBuilder ();
LLVMPositionBuilderAtEnd (builder2, entry_bb);
LLVMBuildRet (builder2, LLVMConstInt (LLVMInt32Type (), 0, FALSE));
LLVMDisposeBuilder (builder2);
}
i8ptr = LLVMPointerType (LLVMInt8Type (), 0);
clause_index = (mono_get_block_region_notry (cfg, bb->region) >> 8) - 1;
/*
* Create the type info
*/
sprintf (ti_name, "type_info_%d", ti_generator);
ti_generator ++;
if (cfg->compile_aot) {
/* decode_eh_frame () in aot-runtime.c will decode this */
type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name);
LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE));
/*
* These symbols are not really used, the clause_index is embedded into the EH tables generated by DwarfMonoException in LLVM.
*/
LLVMSetLinkage (type_info, LLVMInternalLinkage);
} else {
type_info = LLVMAddGlobal (lmodule, LLVMInt32Type (), ti_name);
LLVMSetInitializer (type_info, LLVMConstInt (LLVMInt32Type (), clause_index, FALSE));
}
{
LLVMTypeRef members [2], ret_type;
members [0] = i8ptr;
members [1] = LLVMInt32Type ();
ret_type = LLVMStructType (members, 2, FALSE);
landing_pad = LLVMBuildLandingPad (builder, ret_type, personality, 1, "");
LLVMAddClause (landing_pad, type_info);
/* Store the exception into the exvar */
if (ctx->ex_var)
LLVMBuildStore (builder, convert (ctx, LLVMBuildExtractValue (builder, landing_pad, 0, "ex_obj"), ObjRefType ()), ctx->ex_var);
}
/*
* LLVM throw sites are associated with a one landing pad, and LLVM generated
* code expects control to be transferred to this landing pad even in the
* presence of nested clauses. The landing pad needs to branch to the landing
* pads belonging to nested clauses based on the selector value returned by
* the landing pad instruction, which is passed to the landing pad in a
* register by the EH code.
*/
target_bb = bblocks [bb->block_num].call_handler_target_bb;
g_assert (target_bb);
/*
* Branch to the correct landing pad
*/
LLVMValueRef ex_selector = LLVMBuildExtractValue (builder, landing_pad, 1, "ex_selector");
LLVMValueRef switch_ins = LLVMBuildSwitch (builder, ex_selector, target_bb, 0);
for (l = ctx->nested_in [clause_index]; l; l = l->next) {
int nesting_clause_index = GPOINTER_TO_INT (l->data);
MonoBasicBlock *handler_bb;
handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->clause_to_handler, GINT_TO_POINTER (nesting_clause_index));
g_assert (handler_bb);
g_assert (ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), nesting_clause_index, FALSE), ctx->bblocks [handler_bb->block_num].call_handler_target_bb);
}
/* Start a new bblock which CALL_HANDLER can branch to */
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, target_bb);
ctx->bblocks [bb->block_num].end_bblock = target_bb;
/* Store the exception into the IL level exvar */
if (bb->in_scount == 1) {
g_assert (bb->in_scount == 1);
exvar = bb->in_stack [0];
// FIXME: This is shared with filter clauses ?
g_assert (!values [exvar->dreg]);
g_assert (ctx->ex_var);
values [exvar->dreg] = LLVMBuildLoad (builder, ctx->ex_var, "");
emit_volatile_store (ctx, exvar->dreg);
}
/* Make normal branches to the start of the clause branch to the new bblock */
bblocks [bb->block_num].bblock = target_bb;
}
static LLVMValueRef
get_double_const (MonoCompile *cfg, double val)
{
//#ifdef TARGET_WASM
#if 0
//Wasm requires us to canonicalize NaNs.
if (mono_isnan (val))
*(gint64 *)&val = 0x7FF8000000000000ll;
#endif
return LLVMConstReal (LLVMDoubleType (), val);
}
static LLVMValueRef
get_float_const (MonoCompile *cfg, float val)
{
//#ifdef TARGET_WASM
#if 0
if (mono_isnan (val))
*(int *)&val = 0x7FC00000;
#endif
if (cfg->r4fp)
return LLVMConstReal (LLVMFloatType (), val);
else
return LLVMConstFPExt (LLVMConstReal (LLVMFloatType (), val), LLVMDoubleType ());
}
static LLVMValueRef
call_overloaded_intrins (EmitContext *ctx, int id, llvm_ovr_tag_t ovr_tag, LLVMValueRef *args, const char *name)
{
int key = key_from_id_and_tag (id, ovr_tag);
LLVMValueRef intrins = get_intrins (ctx, key);
int nargs = LLVMCountParamTypes (LLVMGetElementType (LLVMTypeOf (intrins)));
for (int i = 0; i < nargs; ++i) {
LLVMTypeRef t1 = LLVMTypeOf (args [i]);
LLVMTypeRef t2 = LLVMTypeOf (LLVMGetParam (intrins, i));
if (t1 != t2)
args [i] = convert (ctx, args [i], t2);
}
return LLVMBuildCall (ctx->builder, intrins, args, nargs, name);
}
static LLVMValueRef
call_intrins (EmitContext *ctx, int id, LLVMValueRef *args, const char *name)
{
return call_overloaded_intrins (ctx, id, 0, args, name);
}
static void
process_bb (EmitContext *ctx, MonoBasicBlock *bb)
{
MonoCompile *cfg = ctx->cfg;
MonoMethodSignature *sig = ctx->sig;
LLVMValueRef method = ctx->lmethod;
LLVMValueRef *values = ctx->values;
LLVMValueRef *addresses = ctx->addresses;
LLVMCallInfo *linfo = ctx->linfo;
BBInfo *bblocks = ctx->bblocks;
MonoInst *ins;
LLVMBasicBlockRef cbb;
LLVMBuilderRef builder;
gboolean has_terminator;
LLVMValueRef v;
LLVMValueRef lhs, rhs, arg3;
int nins = 0;
cbb = get_end_bb (ctx, bb);
builder = create_builder (ctx);
ctx->builder = builder;
LLVMPositionBuilderAtEnd (builder, cbb);
if (!ctx_ok (ctx))
return;
if (cfg->interp_entry_only && bb != cfg->bb_init && bb != cfg->bb_entry && bb != cfg->bb_exit) {
/* The interp entry code is in bb_entry, skip the rest as we might not be able to compile it */
LLVMBuildUnreachable (builder);
return;
}
if (bb->flags & BB_EXCEPTION_HANDLER) {
if (!ctx->llvm_only && !bblocks [bb->block_num].invoke_target) {
set_failure (ctx, "handler without invokes");
return;
}
if (ctx->llvm_only)
emit_llvmonly_handler_start (ctx, bb, cbb);
else
emit_handler_start (ctx, bb, builder);
if (!ctx_ok (ctx))
return;
builder = ctx->builder;
}
/* Handle PHI nodes first */
/* They should be grouped at the start of the bb */
for (ins = bb->code; ins; ins = ins->next) {
emit_dbg_loc (ctx, builder, ins->cil_code);
if (ins->opcode == OP_NOP)
continue;
if (!MONO_IS_PHI (ins))
break;
if (cfg->interp_entry_only)
break;
int i;
gboolean empty = TRUE;
/* Check that all input bblocks really branch to us */
for (i = 0; i < bb->in_count; ++i) {
if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_NOT_REACHED)
ins->inst_phi_args [i + 1] = -1;
else
empty = FALSE;
}
if (empty) {
/* LLVM doesn't like phi instructions with zero operands */
ctx->is_dead [ins->dreg] = TRUE;
continue;
}
/* Created earlier, insert it now */
LLVMInsertIntoBuilder (builder, values [ins->dreg]);
for (i = 0; i < ins->inst_phi_args [0]; i++) {
int sreg1 = ins->inst_phi_args [i + 1];
int count, j;
/*
* Count the number of times the incoming bblock branches to us,
* since llvm requires a separate entry for each.
*/
if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_SWITCH) {
MonoInst *switch_ins = bb->in_bb [i]->last_ins;
count = 0;
for (j = 0; j < GPOINTER_TO_UINT (switch_ins->klass); ++j) {
if (switch_ins->inst_many_bb [j] == bb)
count ++;
}
} else {
count = 1;
}
/* Remember for later */
for (j = 0; j < count; ++j) {
PhiNode *node = (PhiNode*)mono_mempool_alloc0 (ctx->mempool, sizeof (PhiNode));
node->bb = bb;
node->phi = ins;
node->in_bb = bb->in_bb [i];
node->sreg = sreg1;
bblocks [bb->in_bb [i]->block_num].phi_nodes = g_slist_prepend_mempool (ctx->mempool, bblocks [bb->in_bb [i]->block_num].phi_nodes, node);
}
}
}
// Add volatile stores for PHI nodes
// These need to be emitted after the PHI nodes
for (ins = bb->code; ins; ins = ins->next) {
const char *spec = LLVM_INS_INFO (ins->opcode);
if (ins->opcode == OP_NOP)
continue;
if (!MONO_IS_PHI (ins))
break;
if (spec [MONO_INST_DEST] != 'v')
emit_volatile_store (ctx, ins->dreg);
}
has_terminator = FALSE;
for (ins = bb->code; ins; ins = ins->next) {
const char *spec = LLVM_INS_INFO (ins->opcode);
char *dname = NULL;
char dname_buf [128];
emit_dbg_loc (ctx, builder, ins->cil_code);
nins ++;
if (nins > 1000) {
/*
* Some steps in llc are non-linear in the size of basic blocks, see #5714.
* Start a new bblock.
* Prevent the bblocks to be merged by doing a volatile load + cond branch
* from localloc-ed memory.
*/
if (!cfg->llvm_only)
;//set_failure (ctx, "basic block too long");
if (!ctx->long_bb_break_var) {
ctx->long_bb_break_var = build_alloca_llvm_type_name (ctx, LLVMInt32Type (), 0, "long_bb_break");
mono_llvm_build_store (ctx->alloca_builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE);
}
cbb = gen_bb (ctx, "CONT_LONG_BB");
LLVMBasicBlockRef dummy_bb = gen_bb (ctx, "CONT_LONG_BB_DUMMY");
LLVMValueRef load = mono_llvm_build_load (builder, ctx->long_bb_break_var, "", TRUE);
/*
* The long_bb_break_var is initialized to 0 in the prolog, so this branch will always go to 'cbb'
* but llvm doesn't know that, so the branch is not going to be eliminated.
*/
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntEQ, load, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMBuildCondBr (builder, cmp, cbb, dummy_bb);
/* Emit a dummy false bblock which does nothing but contains a volatile store so it cannot be eliminated */
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, dummy_bb);
mono_llvm_build_store (builder, LLVMConstInt (LLVMInt32Type (), 1, FALSE), ctx->long_bb_break_var, TRUE, LLVM_BARRIER_NONE);
LLVMBuildBr (builder, cbb);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, cbb);
ctx->bblocks [bb->block_num].end_bblock = cbb;
nins = 0;
emit_dbg_loc (ctx, builder, ins->cil_code);
}
if (has_terminator)
/* There could be instructions after a terminator, skip them */
break;
if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins)) {
sprintf (dname_buf, "t%d", ins->dreg);
dname = dname_buf;
}
if (spec [MONO_INST_SRC1] != ' ' && spec [MONO_INST_SRC1] != 'v') {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) && var->opcode != OP_GSHAREDVT_ARG_REGOFFSET) {
lhs = emit_volatile_load (ctx, ins->sreg1);
} else {
/* It is ok for SETRET to have an uninitialized argument */
if (!values [ins->sreg1] && ins->opcode != OP_SETRET) {
set_failure (ctx, "sreg1");
return;
}
lhs = values [ins->sreg1];
}
} else {
lhs = NULL;
}
if (spec [MONO_INST_SRC2] != ' ' && spec [MONO_INST_SRC2] != 'v') {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg2);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) {
rhs = emit_volatile_load (ctx, ins->sreg2);
} else {
if (!values [ins->sreg2]) {
set_failure (ctx, "sreg2");
return;
}
rhs = values [ins->sreg2];
}
} else {
rhs = NULL;
}
if (spec [MONO_INST_SRC3] != ' ' && spec [MONO_INST_SRC3] != 'v') {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg3);
if (var && var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) {
arg3 = emit_volatile_load (ctx, ins->sreg3);
} else {
if (!values [ins->sreg3]) {
set_failure (ctx, "sreg3");
return;
}
arg3 = values [ins->sreg3];
}
} else {
arg3 = NULL;
}
//mono_print_ins (ins);
gboolean skip_volatile_store = FALSE;
switch (ins->opcode) {
case OP_NOP:
case OP_NOT_NULL:
case OP_LIVERANGE_START:
case OP_LIVERANGE_END:
break;
case OP_ICONST:
values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE);
break;
case OP_I8CONST:
#if TARGET_SIZEOF_VOID_P == 4
values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
#else
values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), (gint64)ins->inst_c0, FALSE);
#endif
break;
case OP_R8CONST:
values [ins->dreg] = get_double_const (cfg, *(double*)ins->inst_p0);
break;
case OP_R4CONST:
values [ins->dreg] = get_float_const (cfg, *(float*)ins->inst_p0);
break;
case OP_DUMMY_ICONST:
values [ins->dreg] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
break;
case OP_DUMMY_I8CONST:
values [ins->dreg] = LLVMConstInt (LLVMInt64Type (), 0, FALSE);
break;
case OP_DUMMY_R8CONST:
values [ins->dreg] = LLVMConstReal (LLVMDoubleType (), 0.0f);
break;
case OP_BR: {
LLVMBasicBlockRef target_bb = get_bb (ctx, ins->inst_target_bb);
LLVMBuildBr (builder, target_bb);
has_terminator = TRUE;
break;
}
case OP_SWITCH: {
int i;
LLVMValueRef v;
char bb_name [128];
LLVMBasicBlockRef new_bb;
LLVMBuilderRef new_builder;
// The default branch is already handled
// FIXME: Handle it here
/* Start new bblock */
sprintf (bb_name, "SWITCH_DEFAULT_BB%d", ctx->default_index ++);
new_bb = LLVMAppendBasicBlock (ctx->lmethod, bb_name);
lhs = convert (ctx, lhs, LLVMInt32Type ());
v = LLVMBuildSwitch (builder, lhs, new_bb, GPOINTER_TO_UINT (ins->klass));
for (i = 0; i < GPOINTER_TO_UINT (ins->klass); ++i) {
MonoBasicBlock *target_bb = ins->inst_many_bb [i];
LLVMAddCase (v, LLVMConstInt (LLVMInt32Type (), i, FALSE), get_bb (ctx, target_bb));
}
new_builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (new_builder, new_bb);
LLVMBuildUnreachable (new_builder);
has_terminator = TRUE;
g_assert (!ins->next);
break;
}
case OP_SETRET:
switch (linfo->ret.storage) {
case LLVMArgNormal:
case LLVMArgVtypeInReg:
case LLVMArgVtypeAsScalar:
case LLVMArgWasmVtypeAsScalar: {
LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method)));
LLVMValueRef retval = LLVMGetUndef (ret_type);
gboolean src_in_reg = FALSE;
gboolean is_simd = MONO_CLASS_IS_SIMD (ctx->cfg, mono_class_from_mono_type_internal (sig->ret));
switch (linfo->ret.storage) {
case LLVMArgNormal: src_in_reg = TRUE; break;
case LLVMArgVtypeInReg: case LLVMArgVtypeAsScalar: src_in_reg = is_simd; break;
}
if (src_in_reg && (!lhs || ctx->is_dead [ins->sreg1])) {
/*
* The method did not set its return value, probably because it
* ends with a throw.
*/
LLVMBuildRet (builder, retval);
break;
}
switch (linfo->ret.storage) {
case LLVMArgNormal:
retval = convert (ctx, lhs, type_to_llvm_type (ctx, sig->ret));
break;
case LLVMArgVtypeInReg:
if (is_simd) {
/* The return type is an LLVM aggregate type, so a bare bitcast cannot be used to do this conversion. */
int width = mono_type_size (sig->ret, NULL);
int elems = width / TARGET_SIZEOF_VOID_P;
/* The return value might not be set if there is a throw */
LLVMValueRef val = LLVMBuildBitCast (builder, lhs, LLVMVectorType (IntPtrType (), elems), "");
for (int i = 0; i < elems; ++i) {
LLVMValueRef element = LLVMBuildExtractElement (builder, val, const_int32 (i), "");
retval = LLVMBuildInsertValue (builder, retval, element, i, "setret_simd_vtype_in_reg");
}
} else {
LLVMValueRef addr = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), "");
for (int i = 0; i < 2; ++i) {
if (linfo->ret.pair_storage [i] == LLVMArgInIReg) {
LLVMValueRef indexes [2], part_addr;
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), i, FALSE);
part_addr = LLVMBuildGEP (builder, addr, indexes, 2, "");
retval = LLVMBuildInsertValue (builder, retval, LLVMBuildLoad (builder, part_addr, ""), i, "");
} else {
g_assert (linfo->ret.pair_storage [i] == LLVMArgNone);
}
}
}
break;
case LLVMArgVtypeAsScalar:
if (is_simd) {
retval = LLVMBuildBitCast (builder, values [ins->sreg1], ret_type, "setret_simd_vtype_as_scalar");
} else {
g_assert (addresses [ins->sreg1]);
retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), "");
}
break;
case LLVMArgWasmVtypeAsScalar:
g_assert (addresses [ins->sreg1]);
retval = LLVMBuildLoad (builder, LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (ret_type, 0), ""), "");
break;
}
LLVMBuildRet (builder, retval);
break;
}
case LLVMArgVtypeByRef: {
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgGsharedvtFixed: {
LLVMTypeRef ret_type = type_to_llvm_type (ctx, sig->ret);
/* The return value is in lhs, need to store to the vret argument */
/* sreg1 might not be set */
if (lhs) {
g_assert (cfg->vret_addr);
g_assert (values [cfg->vret_addr->dreg]);
LLVMBuildStore (builder, convert (ctx, lhs, ret_type), convert (ctx, values [cfg->vret_addr->dreg], LLVMPointerType (ret_type, 0)));
}
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgGsharedvtFixedVtype: {
/* Already set */
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgGsharedvtVariable: {
/* Already set */
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgVtypeRetAddr: {
LLVMBuildRetVoid (builder);
break;
}
case LLVMArgAsIArgs:
case LLVMArgFpStruct: {
LLVMTypeRef ret_type = LLVMGetReturnType (LLVMGetElementType (LLVMTypeOf (method)));
LLVMValueRef retval;
g_assert (addresses [ins->sreg1]);
retval = LLVMBuildLoad (builder, convert (ctx, addresses [ins->sreg1], LLVMPointerType (ret_type, 0)), "");
LLVMBuildRet (builder, retval);
break;
}
case LLVMArgNone:
LLVMBuildRetVoid (builder);
break;
default:
g_assert_not_reached ();
break;
}
has_terminator = TRUE;
break;
case OP_ICOMPARE:
case OP_FCOMPARE:
case OP_RCOMPARE:
case OP_LCOMPARE:
case OP_COMPARE:
case OP_ICOMPARE_IMM:
case OP_LCOMPARE_IMM:
case OP_COMPARE_IMM: {
CompRelation rel;
LLVMValueRef cmp, args [16];
gboolean likely = (ins->flags & MONO_INST_LIKELY) != 0;
gboolean unlikely = FALSE;
if (MONO_IS_COND_BRANCH_OP (ins->next)) {
if (ins->next->inst_false_bb->out_of_line)
likely = TRUE;
else if (ins->next->inst_true_bb->out_of_line)
unlikely = TRUE;
}
if (ins->next->opcode == OP_NOP)
break;
if (ins->next->opcode == OP_BR)
/* The comparison result is not needed */
continue;
rel = mono_opcode_to_cond (ins->next->opcode);
if (ins->opcode == OP_ICOMPARE_IMM) {
lhs = convert (ctx, lhs, LLVMInt32Type ());
rhs = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
}
if (ins->opcode == OP_LCOMPARE_IMM) {
lhs = convert (ctx, lhs, LLVMInt64Type ());
rhs = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
}
if (ins->opcode == OP_LCOMPARE) {
lhs = convert (ctx, lhs, LLVMInt64Type ());
rhs = convert (ctx, rhs, LLVMInt64Type ());
}
if (ins->opcode == OP_ICOMPARE) {
lhs = convert (ctx, lhs, LLVMInt32Type ());
rhs = convert (ctx, rhs, LLVMInt32Type ());
}
if (lhs && rhs) {
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind)
rhs = convert (ctx, rhs, LLVMTypeOf (lhs));
else if (LLVMGetTypeKind (LLVMTypeOf (rhs)) == LLVMPointerTypeKind)
lhs = convert (ctx, lhs, LLVMTypeOf (rhs));
}
/* We use COMPARE+SETcc/Bcc, llvm uses SETcc+br cond */
if (ins->opcode == OP_FCOMPARE) {
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), "");
} else if (ins->opcode == OP_RCOMPARE) {
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), "");
} else if (ins->opcode == OP_COMPARE_IMM) {
LLVMIntPredicate llvm_pred = cond_to_llvm_cond [rel];
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && ins->inst_imm == 0) {
// We are emitting a NULL check for a pointer
gboolean nonnull = mono_llvm_is_nonnull (lhs);
if (nonnull && llvm_pred == LLVMIntEQ)
cmp = LLVMConstInt (LLVMInt1Type (), FALSE, FALSE);
else if (nonnull && llvm_pred == LLVMIntNE)
cmp = LLVMConstInt (LLVMInt1Type (), TRUE, FALSE);
else
cmp = LLVMBuildICmp (builder, llvm_pred, lhs, LLVMConstNull (LLVMTypeOf (lhs)), "");
} else {
cmp = LLVMBuildICmp (builder, llvm_pred, convert (ctx, lhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), "");
}
} else if (ins->opcode == OP_LCOMPARE_IMM) {
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
}
else if (ins->opcode == OP_COMPARE) {
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind && LLVMTypeOf (lhs) == LLVMTypeOf (rhs))
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
else
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], convert (ctx, lhs, IntPtrType ()), convert (ctx, rhs, IntPtrType ()), "");
} else
cmp = LLVMBuildICmp (builder, cond_to_llvm_cond [rel], lhs, rhs, "");
if (likely || unlikely) {
args [0] = cmp;
args [1] = LLVMConstInt (LLVMInt1Type (), likely ? 1 : 0, FALSE);
cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, "");
}
if (MONO_IS_COND_BRANCH_OP (ins->next)) {
if (ins->next->inst_true_bb == ins->next->inst_false_bb) {
/*
* If the target bb contains PHI instructions, LLVM requires
* two PHI entries for this bblock, while we only generate one.
* So convert this to an unconditional bblock. (bxc #171).
*/
LLVMBuildBr (builder, get_bb (ctx, ins->next->inst_true_bb));
} else {
LLVMBuildCondBr (builder, cmp, get_bb (ctx, ins->next->inst_true_bb), get_bb (ctx, ins->next->inst_false_bb));
}
has_terminator = TRUE;
} else if (MONO_IS_SETCC (ins->next)) {
sprintf (dname_buf, "t%d", ins->next->dreg);
dname = dname_buf;
values [ins->next->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
/* Add stores for volatile variables */
emit_volatile_store (ctx, ins->next->dreg);
} else if (MONO_IS_COND_EXC (ins->next)) {
gboolean force_explicit_branch = FALSE;
if (bb->region != -1) {
/* Don't tag null check branches in exception-handling
* regions with `make.implicit`.
*/
force_explicit_branch = TRUE;
}
emit_cond_system_exception (ctx, bb, (const char*)ins->next->inst_p1, cmp, force_explicit_branch);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
} else {
set_failure (ctx, "next");
break;
}
ins = ins->next;
break;
}
case OP_FCEQ:
case OP_FCNEQ:
case OP_FCLT:
case OP_FCLT_UN:
case OP_FCGT:
case OP_FCGT_UN:
case OP_FCGE:
case OP_FCLE: {
CompRelation rel;
LLVMValueRef cmp;
rel = mono_opcode_to_cond (ins->opcode);
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMDoubleType ()), convert (ctx, rhs, LLVMDoubleType ()), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
break;
}
case OP_RCEQ:
case OP_RCNEQ:
case OP_RCLT:
case OP_RCLT_UN:
case OP_RCGT:
case OP_RCGT_UN: {
CompRelation rel;
LLVMValueRef cmp;
rel = mono_opcode_to_cond (ins->opcode);
cmp = LLVMBuildFCmp (builder, fpcond_to_llvm_cond [rel], convert (ctx, lhs, LLVMFloatType ()), convert (ctx, rhs, LLVMFloatType ()), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp, LLVMInt32Type (), dname);
break;
}
case OP_PHI:
case OP_FPHI:
case OP_VPHI:
case OP_XPHI: {
// Handled above
skip_volatile_store = TRUE;
break;
}
case OP_MOVE:
case OP_LMOVE:
case OP_XMOVE:
case OP_SETFRET:
g_assert (lhs);
values [ins->dreg] = lhs;
break;
case OP_FMOVE:
case OP_RMOVE: {
MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
g_assert (lhs);
values [ins->dreg] = lhs;
if (var && m_class_get_byval_arg (var->klass)->type == MONO_TYPE_R4) {
/*
* This is added by the spilling pass in case of the JIT,
* but we have to do it ourselves.
*/
values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ());
}
break;
}
case OP_MOVE_F_TO_I4: {
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), ""), LLVMInt32Type (), "");
break;
}
case OP_MOVE_I4_TO_F: {
values [ins->dreg] = LLVMBuildFPExt (builder, LLVMBuildBitCast (builder, lhs, LLVMFloatType (), ""), LLVMDoubleType (), "");
break;
}
case OP_MOVE_F_TO_I8: {
values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMInt64Type (), "");
break;
}
case OP_MOVE_I8_TO_F: {
values [ins->dreg] = LLVMBuildBitCast (builder, lhs, LLVMDoubleType (), "");
break;
}
case OP_IADD:
case OP_ISUB:
case OP_IAND:
case OP_IMUL:
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
case OP_IOR:
case OP_IXOR:
case OP_ISHL:
case OP_ISHR:
case OP_ISHR_UN:
case OP_FADD:
case OP_FSUB:
case OP_FMUL:
case OP_FDIV:
case OP_LADD:
case OP_LSUB:
case OP_LMUL:
case OP_LDIV:
case OP_LDIV_UN:
case OP_LREM:
case OP_LREM_UN:
case OP_LAND:
case OP_LOR:
case OP_LXOR:
case OP_LSHL:
case OP_LSHR:
case OP_LSHR_UN:
lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
emit_div_check (ctx, builder, bb, ins, lhs, rhs);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
switch (ins->opcode) {
case OP_IADD:
case OP_LADD:
values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, dname);
break;
case OP_ISUB:
case OP_LSUB:
values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, dname);
break;
case OP_IMUL:
case OP_LMUL:
values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, dname);
break;
case OP_IREM:
case OP_LREM:
values [ins->dreg] = LLVMBuildSRem (builder, lhs, rhs, dname);
break;
case OP_IREM_UN:
case OP_LREM_UN:
values [ins->dreg] = LLVMBuildURem (builder, lhs, rhs, dname);
break;
case OP_IDIV:
case OP_LDIV:
values [ins->dreg] = LLVMBuildSDiv (builder, lhs, rhs, dname);
break;
case OP_IDIV_UN:
case OP_LDIV_UN:
values [ins->dreg] = LLVMBuildUDiv (builder, lhs, rhs, dname);
break;
case OP_FDIV:
case OP_RDIV:
values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname);
break;
case OP_IAND:
case OP_LAND:
values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, dname);
break;
case OP_IOR:
case OP_LOR:
values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, dname);
break;
case OP_IXOR:
case OP_LXOR:
values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, dname);
break;
case OP_ISHL:
case OP_LSHL:
values [ins->dreg] = LLVMBuildShl (builder, lhs, rhs, dname);
break;
case OP_ISHR:
case OP_LSHR:
values [ins->dreg] = LLVMBuildAShr (builder, lhs, rhs, dname);
break;
case OP_ISHR_UN:
case OP_LSHR_UN:
values [ins->dreg] = LLVMBuildLShr (builder, lhs, rhs, dname);
break;
case OP_FADD:
values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname);
break;
case OP_FSUB:
values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname);
break;
case OP_FMUL:
values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname);
break;
default:
g_assert_not_reached ();
}
break;
case OP_RADD:
case OP_RSUB:
case OP_RMUL:
case OP_RDIV: {
lhs = convert (ctx, lhs, LLVMFloatType ());
rhs = convert (ctx, rhs, LLVMFloatType ());
switch (ins->opcode) {
case OP_RADD:
values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, dname);
break;
case OP_RSUB:
values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, dname);
break;
case OP_RMUL:
values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, dname);
break;
case OP_RDIV:
values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, dname);
break;
default:
g_assert_not_reached ();
break;
}
break;
}
case OP_IADD_IMM:
case OP_ISUB_IMM:
case OP_IMUL_IMM:
case OP_IREM_IMM:
case OP_IREM_UN_IMM:
case OP_IDIV_IMM:
case OP_IDIV_UN_IMM:
case OP_IAND_IMM:
case OP_IOR_IMM:
case OP_IXOR_IMM:
case OP_ISHL_IMM:
case OP_ISHR_IMM:
case OP_ISHR_UN_IMM:
case OP_LADD_IMM:
case OP_LSUB_IMM:
case OP_LMUL_IMM:
case OP_LREM_IMM:
case OP_LAND_IMM:
case OP_LOR_IMM:
case OP_LXOR_IMM:
case OP_LSHL_IMM:
case OP_LSHR_IMM:
case OP_LSHR_UN_IMM:
case OP_ADD_IMM:
case OP_AND_IMM:
case OP_MUL_IMM:
case OP_SHL_IMM:
case OP_SHR_IMM:
case OP_SHR_UN_IMM: {
LLVMValueRef imm;
if (spec [MONO_INST_SRC1] == 'l') {
imm = LLVMConstInt (LLVMInt64Type (), GET_LONG_IMM (ins), FALSE);
} else {
imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
}
emit_div_check (ctx, builder, bb, ins, lhs, imm);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
#if TARGET_SIZEOF_VOID_P == 4
if (ins->opcode == OP_LSHL_IMM || ins->opcode == OP_LSHR_IMM || ins->opcode == OP_LSHR_UN_IMM)
imm = LLVMConstInt (LLVMInt32Type (), ins->inst_imm, FALSE);
#endif
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) == LLVMPointerTypeKind)
lhs = convert (ctx, lhs, IntPtrType ());
imm = convert (ctx, imm, LLVMTypeOf (lhs));
switch (ins->opcode) {
case OP_IADD_IMM:
case OP_LADD_IMM:
case OP_ADD_IMM:
values [ins->dreg] = LLVMBuildAdd (builder, lhs, imm, dname);
break;
case OP_ISUB_IMM:
case OP_LSUB_IMM:
values [ins->dreg] = LLVMBuildSub (builder, lhs, imm, dname);
break;
case OP_IMUL_IMM:
case OP_MUL_IMM:
case OP_LMUL_IMM:
values [ins->dreg] = LLVMBuildMul (builder, lhs, imm, dname);
break;
case OP_IDIV_IMM:
case OP_LDIV_IMM:
values [ins->dreg] = LLVMBuildSDiv (builder, lhs, imm, dname);
break;
case OP_IDIV_UN_IMM:
case OP_LDIV_UN_IMM:
values [ins->dreg] = LLVMBuildUDiv (builder, lhs, imm, dname);
break;
case OP_IREM_IMM:
case OP_LREM_IMM:
values [ins->dreg] = LLVMBuildSRem (builder, lhs, imm, dname);
break;
case OP_IREM_UN_IMM:
values [ins->dreg] = LLVMBuildURem (builder, lhs, imm, dname);
break;
case OP_IAND_IMM:
case OP_LAND_IMM:
case OP_AND_IMM:
values [ins->dreg] = LLVMBuildAnd (builder, lhs, imm, dname);
break;
case OP_IOR_IMM:
case OP_LOR_IMM:
values [ins->dreg] = LLVMBuildOr (builder, lhs, imm, dname);
break;
case OP_IXOR_IMM:
case OP_LXOR_IMM:
values [ins->dreg] = LLVMBuildXor (builder, lhs, imm, dname);
break;
case OP_ISHL_IMM:
case OP_LSHL_IMM:
values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname);
break;
case OP_SHL_IMM:
if (TARGET_SIZEOF_VOID_P == 8) {
/* The IL is not regular */
lhs = convert (ctx, lhs, LLVMInt64Type ());
imm = convert (ctx, imm, LLVMInt64Type ());
}
values [ins->dreg] = LLVMBuildShl (builder, lhs, imm, dname);
break;
case OP_ISHR_IMM:
case OP_LSHR_IMM:
case OP_SHR_IMM:
values [ins->dreg] = LLVMBuildAShr (builder, lhs, imm, dname);
break;
case OP_ISHR_UN_IMM:
/* This is used to implement conv.u4, so the lhs could be an i8 */
lhs = convert (ctx, lhs, LLVMInt32Type ());
imm = convert (ctx, imm, LLVMInt32Type ());
values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname);
break;
case OP_LSHR_UN_IMM:
case OP_SHR_UN_IMM:
values [ins->dreg] = LLVMBuildLShr (builder, lhs, imm, dname);
break;
default:
g_assert_not_reached ();
}
break;
}
case OP_INEG:
values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname);
break;
case OP_LNEG:
if (LLVMTypeOf (lhs) != LLVMInt64Type ())
lhs = convert (ctx, lhs, LLVMInt64Type ());
values [ins->dreg] = LLVMBuildSub (builder, LLVMConstInt (LLVMInt64Type (), 0, FALSE), lhs, dname);
break;
case OP_FNEG:
lhs = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname);
break;
case OP_RNEG:
lhs = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = LLVMBuildFNeg (builder, lhs, dname);
break;
case OP_INOT: {
guint32 v = 0xffffffff;
values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt32Type (), v, FALSE), convert (ctx, lhs, LLVMInt32Type ()), dname);
break;
}
case OP_LNOT: {
if (LLVMTypeOf (lhs) != LLVMInt64Type ())
lhs = convert (ctx, lhs, LLVMInt64Type ());
guint64 v = 0xffffffffffffffffLL;
values [ins->dreg] = LLVMBuildXor (builder, LLVMConstInt (LLVMInt64Type (), v, FALSE), lhs, dname);
break;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_X86_LEA: {
LLVMValueRef v1, v2;
rhs = LLVMBuildSExt (builder, convert (ctx, rhs, LLVMInt32Type ()), LLVMInt64Type (), "");
v1 = LLVMBuildMul (builder, convert (ctx, rhs, IntPtrType ()), LLVMConstInt (IntPtrType (), ((unsigned long long)1 << ins->backend.shift_amount), FALSE), "");
v2 = LLVMBuildAdd (builder, convert (ctx, lhs, IntPtrType ()), v1, "");
values [ins->dreg] = LLVMBuildAdd (builder, v2, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), dname);
break;
}
case OP_X86_BSF32:
case OP_X86_BSF64: {
LLVMValueRef args [] = {
lhs,
LLVMConstInt (LLVMInt1Type (), 1, TRUE),
};
int op = ins->opcode == OP_X86_BSF32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64;
values [ins->dreg] = call_intrins (ctx, op, args, dname);
break;
}
case OP_X86_BSR32:
case OP_X86_BSR64: {
LLVMValueRef args [] = {
lhs,
LLVMConstInt (LLVMInt1Type (), 1, TRUE),
};
int op = ins->opcode == OP_X86_BSR32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64;
LLVMValueRef width = ins->opcode == OP_X86_BSR32 ? const_int32 (31) : const_int64 (63);
LLVMValueRef tz = call_intrins (ctx, op, args, "");
values [ins->dreg] = LLVMBuildXor (builder, tz, width, dname);
break;
}
#endif
case OP_ICONV_TO_I1:
case OP_ICONV_TO_I2:
case OP_ICONV_TO_I4:
case OP_ICONV_TO_U1:
case OP_ICONV_TO_U2:
case OP_ICONV_TO_U4:
case OP_LCONV_TO_I1:
case OP_LCONV_TO_I2:
case OP_LCONV_TO_U1:
case OP_LCONV_TO_U2:
case OP_LCONV_TO_U4: {
gboolean sign;
sign = (ins->opcode == OP_ICONV_TO_I1) || (ins->opcode == OP_ICONV_TO_I2) || (ins->opcode == OP_ICONV_TO_I4) || (ins->opcode == OP_LCONV_TO_I1) || (ins->opcode == OP_LCONV_TO_I2);
/* Have to do two casts since our vregs have type int */
v = LLVMBuildTrunc (builder, lhs, op_to_llvm_type (ins->opcode), "");
if (sign)
values [ins->dreg] = LLVMBuildSExt (builder, v, LLVMInt32Type (), dname);
else
values [ins->dreg] = LLVMBuildZExt (builder, v, LLVMInt32Type (), dname);
break;
}
case OP_ICONV_TO_I8:
values [ins->dreg] = LLVMBuildSExt (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_ICONV_TO_U8:
values [ins->dreg] = LLVMBuildZExt (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_FCONV_TO_I4:
case OP_RCONV_TO_I4:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_FCONV_TO_I1:
case OP_RCONV_TO_I1:
values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt8Type (), dname), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_U1:
case OP_RCONV_TO_U1:
values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildTrunc (builder, LLVMBuildFPToUI (builder, lhs, IntPtrType (), dname), LLVMInt8Type (), ""), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_I2:
case OP_RCONV_TO_I2:
values [ins->dreg] = LLVMBuildSExt (builder, LLVMBuildFPToSI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_U2:
case OP_RCONV_TO_U2:
values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildFPToUI (builder, lhs, LLVMInt16Type (), dname), LLVMInt32Type (), "");
break;
case OP_FCONV_TO_U4:
case OP_RCONV_TO_U4:
values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_FCONV_TO_U8:
case OP_RCONV_TO_U8:
values [ins->dreg] = LLVMBuildFPToUI (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_FCONV_TO_I8:
case OP_RCONV_TO_I8:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMInt64Type (), dname);
break;
case OP_FCONV_TO_I:
case OP_RCONV_TO_I:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, IntPtrType (), dname);
break;
case OP_ICONV_TO_R8:
case OP_LCONV_TO_R8:
values [ins->dreg] = LLVMBuildSIToFP (builder, lhs, LLVMDoubleType (), dname);
break;
case OP_ICONV_TO_R_UN:
case OP_LCONV_TO_R_UN:
values [ins->dreg] = LLVMBuildUIToFP (builder, lhs, LLVMDoubleType (), dname);
break;
#if TARGET_SIZEOF_VOID_P == 4
case OP_LCONV_TO_U:
#endif
case OP_LCONV_TO_I4:
values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_ICONV_TO_R4:
case OP_LCONV_TO_R4:
v = LLVMBuildSIToFP (builder, lhs, LLVMFloatType (), "");
if (cfg->r4fp)
values [ins->dreg] = v;
else
values [ins->dreg] = LLVMBuildFPExt (builder, v, LLVMDoubleType (), dname);
break;
case OP_FCONV_TO_R4:
v = LLVMBuildFPTrunc (builder, lhs, LLVMFloatType (), "");
if (cfg->r4fp)
values [ins->dreg] = v;
else
values [ins->dreg] = LLVMBuildFPExt (builder, v, LLVMDoubleType (), dname);
break;
case OP_RCONV_TO_R8:
values [ins->dreg] = LLVMBuildFPExt (builder, lhs, LLVMDoubleType (), dname);
break;
case OP_RCONV_TO_R4:
values [ins->dreg] = lhs;
break;
case OP_SEXT_I4:
values [ins->dreg] = LLVMBuildSExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname);
break;
case OP_ZEXT_I4:
values [ins->dreg] = LLVMBuildZExt (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMInt64Type (), dname);
break;
case OP_TRUNC_I4:
values [ins->dreg] = LLVMBuildTrunc (builder, lhs, LLVMInt32Type (), dname);
break;
case OP_LOCALLOC_IMM: {
LLVMValueRef v;
guint32 size = ins->inst_imm;
size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
v = mono_llvm_build_alloca (builder, LLVMInt8Type (), LLVMConstInt (LLVMInt32Type (), size, FALSE), MONO_ARCH_FRAME_ALIGNMENT, "");
if (ins->flags & MONO_INST_INIT)
emit_memset (ctx, builder, v, const_int32 (size), MONO_ARCH_FRAME_ALIGNMENT);
values [ins->dreg] = v;
break;
}
case OP_LOCALLOC: {
LLVMValueRef v, size;
size = LLVMBuildAnd (builder, LLVMBuildAdd (builder, convert (ctx, lhs, LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), MONO_ARCH_FRAME_ALIGNMENT - 1, FALSE), ""), LLVMConstInt (LLVMInt32Type (), ~ (MONO_ARCH_FRAME_ALIGNMENT - 1), FALSE), "");
v = mono_llvm_build_alloca (builder, LLVMInt8Type (), size, MONO_ARCH_FRAME_ALIGNMENT, "");
if (ins->flags & MONO_INST_INIT)
emit_memset (ctx, builder, v, size, MONO_ARCH_FRAME_ALIGNMENT);
values [ins->dreg] = v;
break;
}
case OP_LOADI1_MEMBASE:
case OP_LOADU1_MEMBASE:
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADI8_MEMBASE:
case OP_LOADR4_MEMBASE:
case OP_LOADR8_MEMBASE:
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEM:
case OP_LOADU1_MEM:
case OP_LOADU2_MEM:
case OP_LOADI4_MEM:
case OP_LOADU4_MEM:
case OP_LOAD_MEM: {
int size = 8;
LLVMValueRef base, index, addr;
LLVMTypeRef t;
gboolean sext = FALSE, zext = FALSE;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0;
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
if (sext || zext)
dname = (char*)"";
if ((ins->opcode == OP_LOADI8_MEM) || (ins->opcode == OP_LOAD_MEM) || (ins->opcode == OP_LOADI4_MEM) || (ins->opcode == OP_LOADU4_MEM) || (ins->opcode == OP_LOADU1_MEM) || (ins->opcode == OP_LOADU2_MEM)) {
addr = LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE);
base = addr;
} else {
/* _MEMBASE */
base = lhs;
if (ins->inst_offset == 0) {
LLVMValueRef gep_base, gep_offset;
if (mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) {
addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, "");
} else {
addr = base;
}
} else if (ins->inst_offset % size != 0) {
/* Unaligned load */
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
} else {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
}
}
addr = convert (ctx, addr, LLVMPointerType (t, 0));
if (is_unaligned)
values [ins->dreg] = mono_llvm_build_aligned_load (builder, addr, dname, is_volatile, 1);
else
values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, base, dname, is_faulting, is_volatile, LLVM_BARRIER_NONE);
if (!(is_faulting || is_volatile) && (ins->flags & MONO_INST_INVARIANT_LOAD)) {
/*
* These will signal LLVM that these loads do not alias any stores, and
* they can't fail, allowing them to be hoisted out of loops.
*/
set_invariant_load_flag (values [ins->dreg]);
}
if (sext)
values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
else if (zext)
values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
else if (!cfg->r4fp && ins->opcode == OP_LOADR4_MEMBASE)
values [ins->dreg] = LLVMBuildFPExt (builder, values [ins->dreg], LLVMDoubleType (), dname);
break;
}
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI2_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG:
case OP_STORER4_MEMBASE_REG:
case OP_STORER8_MEMBASE_REG:
case OP_STORE_MEMBASE_REG: {
int size = 8;
LLVMValueRef index, addr, base;
LLVMTypeRef t;
gboolean sext = FALSE, zext = FALSE;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0;
if (!values [ins->inst_destbasereg]) {
set_failure (ctx, "inst_destbasereg");
break;
}
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
base = values [ins->inst_destbasereg];
LLVMValueRef gep_base, gep_offset;
if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) {
addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, "");
} else if (ins->inst_offset % size != 0) {
/* Unaligned store */
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
} else {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
}
if (is_volatile && LLVMGetInstructionOpcode (base) == LLVMAlloca && !(ins->flags & MONO_INST_VOLATILE))
/* Storing to an alloca cannot fail */
is_volatile = FALSE;
LLVMValueRef srcval = convert (ctx, values [ins->sreg1], t);
LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0));
if (is_unaligned)
mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1);
else
emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile);
break;
}
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM:
case OP_STORE_MEMBASE_IMM: {
int size = 8;
LLVMValueRef index, addr, base;
LLVMTypeRef t;
gboolean sext = FALSE, zext = FALSE;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
gboolean is_unaligned = (ins->flags & MONO_INST_UNALIGNED) != 0;
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
base = values [ins->inst_destbasereg];
LLVMValueRef gep_base, gep_offset;
if (ins->inst_offset == 0 && mono_llvm_can_be_gep (base, &gep_base, &gep_offset)) {
addr = LLVMBuildGEP (builder, convert (ctx, gep_base, LLVMPointerType (LLVMInt8Type (), 0)), &gep_offset, 1, "");
} else if (ins->inst_offset % size != 0) {
/* Unaligned store */
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (LLVMInt8Type (), 0)), &index, 1, "");
} else {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
}
LLVMValueRef srcval = convert (ctx, LLVMConstInt (IntPtrType (), ins->inst_imm, FALSE), t);
LLVMValueRef ptrdst = convert (ctx, addr, LLVMPointerType (t, 0));
if (is_unaligned)
mono_llvm_build_aligned_store (builder, srcval, ptrdst, is_volatile, 1);
else
emit_store (ctx, bb, &builder, size, srcval, ptrdst, base, is_faulting, is_volatile);
break;
}
case OP_CHECK_THIS:
emit_load (ctx, bb, &builder, TARGET_SIZEOF_VOID_P, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), lhs, "", TRUE, FALSE, LLVM_BARRIER_NONE);
break;
case OP_OUTARG_VTRETADDR:
break;
case OP_VOIDCALL:
case OP_CALL:
case OP_LCALL:
case OP_FCALL:
case OP_RCALL:
case OP_VCALL:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_FCALL_MEMBASE:
case OP_RCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
case OP_LCALL_REG:
case OP_FCALL_REG:
case OP_RCALL_REG:
case OP_VCALL_REG: {
process_call (ctx, bb, &builder, ins);
break;
}
case OP_AOTCONST: {
MonoJumpInfoType ji_type = ins->inst_c1;
gpointer ji_data = ins->inst_p0;
if (ji_type == MONO_PATCH_INFO_ICALL_ADDR) {
char *symbol = mono_aot_get_direct_call_symbol (MONO_PATCH_INFO_ICALL_ADDR_CALL, ji_data);
if (symbol) {
/*
* Avoid emitting a got entry for these since the method is directly called, and it might not be
* resolvable at runtime using dlsym ().
*/
g_free (symbol);
values [ins->dreg] = LLVMConstInt (IntPtrType (), 0, FALSE);
break;
}
}
values [ins->dreg] = get_aotconst (ctx, ji_type, ji_data, LLVMPointerType (IntPtrType (), 0));
break;
}
case OP_MEMMOVE: {
int argn = 0;
LLVMValueRef args [5];
args [argn++] = convert (ctx, values [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0));
args [argn++] = convert (ctx, values [ins->sreg2], LLVMPointerType (LLVMInt8Type (), 0));
args [argn++] = convert (ctx, values [ins->sreg3], LLVMInt64Type ());
args [argn++] = LLVMConstInt (LLVMInt1Type (), 0, FALSE); // is_volatile
call_intrins (ctx, INTRINS_MEMMOVE, args, "");
break;
}
case OP_NOT_REACHED:
LLVMBuildUnreachable (builder);
has_terminator = TRUE;
g_assert (bb->block_num < cfg->max_block_num);
ctx->unreachable [bb->block_num] = TRUE;
/* Might have instructions after this */
while (ins->next) {
MonoInst *next = ins->next;
/*
* FIXME: If later code uses the regs defined by these instructions,
* compilation will fail.
*/
const char *spec = INS_INFO (next->opcode);
if (spec [MONO_INST_DEST] == 'i' && !MONO_IS_STORE_MEMBASE (next))
ctx->values [next->dreg] = LLVMConstNull (LLVMInt32Type ());
MONO_DELETE_INS (bb, next);
}
break;
case OP_LDADDR: {
MonoInst *var = ins->inst_i0;
MonoClass *klass = var->klass;
if (var->opcode == OP_VTARG_ADDR && !MONO_CLASS_IS_SIMD(cfg, klass)) {
/* The variable contains the vtype address */
values [ins->dreg] = values [var->dreg];
} else if (var->opcode == OP_GSHAREDVT_LOCAL) {
values [ins->dreg] = emit_gsharedvt_ldaddr (ctx, var->dreg);
} else {
values [ins->dreg] = addresses [var->dreg];
}
break;
}
case OP_SIN: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SIN, args, dname);
break;
}
case OP_SINF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SINF, args, dname);
break;
}
case OP_EXP: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_EXP, args, dname);
break;
}
case OP_EXPF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_EXPF, args, dname);
break;
}
case OP_LOG2: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2, args, dname);
break;
}
case OP_LOG2F: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG2F, args, dname);
break;
}
case OP_LOG10: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10, args, dname);
break;
}
case OP_LOG10F: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG10F, args, dname);
break;
}
case OP_LOG: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_LOG, args, dname);
break;
}
case OP_TRUNC: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNC, args, dname);
break;
}
case OP_TRUNCF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_TRUNCF, args, dname);
break;
}
case OP_COS: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COS, args, dname);
break;
}
case OP_COSF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COSF, args, dname);
break;
}
case OP_SQRT: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SQRT, args, dname);
break;
}
case OP_SQRTF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_SQRTF, args, dname);
break;
}
case OP_FLOOR: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FLOOR, args, dname);
break;
}
case OP_FLOORF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FLOORF, args, dname);
break;
}
case OP_CEIL: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_CEIL, args, dname);
break;
}
case OP_CEILF: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_CEILF, args, dname);
break;
}
case OP_FMA: {
LLVMValueRef args [3];
args [0] = convert (ctx, values [ins->sreg1], LLVMDoubleType ());
args [1] = convert (ctx, values [ins->sreg2], LLVMDoubleType ());
args [2] = convert (ctx, values [ins->sreg3], LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FMA, args, dname);
break;
}
case OP_FMAF: {
LLVMValueRef args [3];
args [0] = convert (ctx, values [ins->sreg1], LLVMFloatType ());
args [1] = convert (ctx, values [ins->sreg2], LLVMFloatType ());
args [2] = convert (ctx, values [ins->sreg3], LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FMAF, args, dname);
break;
}
case OP_ABS: {
LLVMValueRef args [1];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname);
break;
}
case OP_ABSF: {
LLVMValueRef args [1];
#ifdef TARGET_AMD64
args [0] = convert (ctx, lhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_ABSF, args, dname);
#else
/* llvm.fabs not supported on all platforms */
args [0] = convert (ctx, lhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_FABS, args, dname);
values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMFloatType ());
#endif
break;
}
case OP_RPOW: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMFloatType ());
args [1] = convert (ctx, rhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_POWF, args, dname);
break;
}
case OP_FPOW: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
args [1] = convert (ctx, rhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_POW, args, dname);
break;
}
case OP_FCOPYSIGN: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMDoubleType ());
args [1] = convert (ctx, rhs, LLVMDoubleType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGN, args, dname);
break;
}
case OP_RCOPYSIGN: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, LLVMFloatType ());
args [1] = convert (ctx, rhs, LLVMFloatType ());
values [ins->dreg] = call_intrins (ctx, INTRINS_COPYSIGNF, args, dname);
break;
}
case OP_IMIN:
case OP_LMIN:
case OP_IMAX:
case OP_LMAX:
case OP_IMIN_UN:
case OP_LMIN_UN:
case OP_IMAX_UN:
case OP_LMAX_UN:
case OP_FMIN:
case OP_FMAX:
case OP_RMIN:
case OP_RMAX: {
LLVMValueRef v;
lhs = convert (ctx, lhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
rhs = convert (ctx, rhs, regtype_to_llvm_type (spec [MONO_INST_DEST]));
switch (ins->opcode) {
case OP_IMIN:
case OP_LMIN:
v = LLVMBuildICmp (builder, LLVMIntSLE, lhs, rhs, "");
break;
case OP_IMAX:
case OP_LMAX:
v = LLVMBuildICmp (builder, LLVMIntSGE, lhs, rhs, "");
break;
case OP_IMIN_UN:
case OP_LMIN_UN:
v = LLVMBuildICmp (builder, LLVMIntULE, lhs, rhs, "");
break;
case OP_IMAX_UN:
case OP_LMAX_UN:
v = LLVMBuildICmp (builder, LLVMIntUGE, lhs, rhs, "");
break;
case OP_FMAX:
case OP_RMAX:
v = LLVMBuildFCmp (builder, LLVMRealUGE, lhs, rhs, "");
break;
case OP_FMIN:
case OP_RMIN:
v = LLVMBuildFCmp (builder, LLVMRealULE, lhs, rhs, "");
break;
default:
g_assert_not_reached ();
break;
}
values [ins->dreg] = LLVMBuildSelect (builder, v, lhs, rhs, dname);
break;
}
/*
* See the ARM64 comment in mono/utils/atomic.h for an explanation of why this
* hack is necessary (for now).
*/
#ifdef TARGET_ARM64
#define ARM64_ATOMIC_FENCE_FIX mono_llvm_build_fence (builder, LLVM_BARRIER_SEQ)
#else
#define ARM64_ATOMIC_FENCE_FIX
#endif
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8: {
LLVMValueRef args [2];
LLVMTypeRef t;
if (ins->opcode == OP_ATOMIC_EXCHANGE_I4)
t = LLVMInt32Type ();
else
t = LLVMInt64Type ();
g_assert (ins->inst_offset == 0);
args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
args [1] = convert (ctx, rhs, t);
ARM64_ATOMIC_FENCE_FIX;
values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]);
ARM64_ATOMIC_FENCE_FIX;
break;
}
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_AND_I4:
case OP_ATOMIC_AND_I8:
case OP_ATOMIC_OR_I4:
case OP_ATOMIC_OR_I8: {
LLVMValueRef args [2];
LLVMTypeRef t;
if (ins->type == STACK_I4)
t = LLVMInt32Type ();
else
t = LLVMInt64Type ();
g_assert (ins->inst_offset == 0);
args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
args [1] = convert (ctx, rhs, t);
ARM64_ATOMIC_FENCE_FIX;
if (ins->opcode == OP_ATOMIC_ADD_I4 || ins->opcode == OP_ATOMIC_ADD_I8)
// Interlocked.Add returns new value (that's why we emit additional Add here)
// see https://github.com/dotnet/runtime/pull/33102
values [ins->dreg] = LLVMBuildAdd (builder, mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_ADD, args [0], args [1]), args [1], dname);
else if (ins->opcode == OP_ATOMIC_AND_I4 || ins->opcode == OP_ATOMIC_AND_I8)
values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_AND, args [0], args [1]);
else if (ins->opcode == OP_ATOMIC_OR_I4 || ins->opcode == OP_ATOMIC_OR_I8)
values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_OR, args [0], args [1]);
else
g_assert_not_reached ();
ARM64_ATOMIC_FENCE_FIX;
break;
}
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_CAS_I8: {
LLVMValueRef args [3], val;
LLVMTypeRef t;
if (ins->opcode == OP_ATOMIC_CAS_I4)
t = LLVMInt32Type ();
else
t = LLVMInt64Type ();
args [0] = convert (ctx, lhs, LLVMPointerType (t, 0));
/* comparand */
args [1] = convert (ctx, values [ins->sreg3], t);
/* new value */
args [2] = convert (ctx, values [ins->sreg2], t);
ARM64_ATOMIC_FENCE_FIX;
val = mono_llvm_build_cmpxchg (builder, args [0], args [1], args [2]);
ARM64_ATOMIC_FENCE_FIX;
/* cmpxchg returns a pair */
values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, "");
break;
}
case OP_MEMORY_BARRIER: {
mono_llvm_build_fence (builder, (BarrierKind) ins->backend.memory_barrier_kind);
break;
}
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_LOAD_R8: {
int size;
gboolean sext, zext;
LLVMTypeRef t;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind;
LLVMValueRef index, addr;
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
if (sext || zext)
dname = (char *)"";
if (ins->inst_offset != 0) {
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, lhs, LLVMPointerType (t, 0)), &index, 1, "");
} else {
addr = lhs;
}
addr = convert (ctx, addr, LLVMPointerType (t, 0));
ARM64_ATOMIC_FENCE_FIX;
values [ins->dreg] = emit_load (ctx, bb, &builder, size, addr, lhs, dname, is_faulting, is_volatile, barrier);
ARM64_ATOMIC_FENCE_FIX;
if (sext)
values [ins->dreg] = LLVMBuildSExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
else if (zext)
values [ins->dreg] = LLVMBuildZExt (builder, values [ins->dreg], LLVMInt32Type (), dname);
break;
}
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
case OP_ATOMIC_STORE_U8:
case OP_ATOMIC_STORE_R4:
case OP_ATOMIC_STORE_R8: {
int size;
gboolean sext, zext;
LLVMTypeRef t;
gboolean is_faulting = (ins->flags & MONO_INST_FAULT) != 0;
gboolean is_volatile = (ins->flags & MONO_INST_VOLATILE) != 0;
BarrierKind barrier = (BarrierKind) ins->backend.memory_barrier_kind;
LLVMValueRef index, addr, value, base;
if (!values [ins->inst_destbasereg]) {
set_failure (ctx, "inst_destbasereg");
break;
}
t = load_store_to_llvm_type (ins->opcode, &size, &sext, &zext);
base = values [ins->inst_destbasereg];
index = LLVMConstInt (LLVMInt32Type (), ins->inst_offset / size, FALSE);
addr = LLVMBuildGEP (builder, convert (ctx, base, LLVMPointerType (t, 0)), &index, 1, "");
value = convert (ctx, values [ins->sreg1], t);
ARM64_ATOMIC_FENCE_FIX;
emit_store_general (ctx, bb, &builder, size, value, addr, base, is_faulting, is_volatile, barrier);
ARM64_ATOMIC_FENCE_FIX;
break;
}
case OP_RELAXED_NOP: {
#if defined(TARGET_AMD64) || defined(TARGET_X86)
call_intrins (ctx, INTRINS_SSE_PAUSE, NULL, "");
break;
#else
break;
#endif
}
case OP_TLS_GET: {
#if (defined(TARGET_AMD64) || defined(TARGET_X86)) && defined(__linux__)
#ifdef TARGET_AMD64
// 257 == FS segment register
LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 257);
#else
// 256 == GS segment register
LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256);
#endif
// FIXME: XEN
values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), ins->inst_offset, TRUE), ptrtype, ""), "");
#elif defined(TARGET_AMD64) && defined(TARGET_OSX)
/* See mono_amd64_emit_tls_get () */
int offset = mono_amd64_get_tls_gs_offset () + (ins->inst_offset * 8);
// 256 == GS segment register
LLVMTypeRef ptrtype = LLVMPointerType (IntPtrType (), 256);
values [ins->dreg] = LLVMBuildLoad (builder, LLVMBuildIntToPtr (builder, LLVMConstInt (IntPtrType (), offset, TRUE), ptrtype, ""), "");
#else
set_failure (ctx, "opcode tls-get");
break;
#endif
break;
}
case OP_GC_SAFE_POINT: {
LLVMValueRef val, cmp, callee, call;
LLVMBasicBlockRef poll_bb, cont_bb;
LLVMValueRef args [2];
static LLVMTypeRef sig;
const char *icall_name = "mono_threads_state_poll";
/*
* Create the cold wrapper around the icall, along with a managed method for it so
* unwinding works.
*/
if (!cfg->compile_aot && !ctx->module->gc_poll_cold_wrapper_compiled) {
ERROR_DECL (error);
/* Compiling a method here is a bit ugly, but it works */
MonoMethod *wrapper = mono_marshal_get_llvm_func_wrapper (LLVM_FUNC_WRAPPER_GC_POLL);
ctx->module->gc_poll_cold_wrapper_compiled = mono_jit_compile_method (wrapper, error);
mono_error_assert_ok (error);
}
if (!sig)
sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
/*
* if (!*sreg1)
* mono_threads_state_poll ();
*/
val = mono_llvm_build_load (builder, convert (ctx, lhs, LLVMPointerType (IntPtrType (), 0)), "", TRUE);
cmp = LLVMBuildICmp (builder, LLVMIntEQ, val, LLVMConstNull (LLVMTypeOf (val)), "");
poll_bb = gen_bb (ctx, "POLL_BB");
cont_bb = gen_bb (ctx, "CONT_BB");
args [0] = cmp;
args [1] = LLVMConstInt (LLVMInt1Type (), 1, FALSE);
cmp = call_intrins (ctx, INTRINS_EXPECT_I1, args, "");
mono_llvm_build_weighted_branch (builder, cmp, cont_bb, poll_bb, 1000, 1);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, poll_bb);
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
call = LLVMBuildCall (builder, callee, NULL, 0, "");
} else {
callee = get_jit_callee (ctx, icall_name, sig, MONO_PATCH_INFO_ABS, ctx->module->gc_poll_cold_wrapper_compiled);
call = LLVMBuildCall (builder, callee, NULL, 0, "");
set_call_cold_cconv (call);
}
LLVMBuildBr (builder, cont_bb);
ctx->builder = builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, cont_bb);
ctx->bblocks [bb->block_num].end_bblock = cont_bb;
break;
}
/*
* Overflow opcodes.
*/
case OP_IADD_OVF:
case OP_IADD_OVF_UN:
case OP_ISUB_OVF:
case OP_ISUB_OVF_UN:
case OP_IMUL_OVF:
case OP_IMUL_OVF_UN:
case OP_LADD_OVF:
case OP_LADD_OVF_UN:
case OP_LSUB_OVF:
case OP_LSUB_OVF_UN:
case OP_LMUL_OVF:
case OP_LMUL_OVF_UN: {
LLVMValueRef args [2], val, ovf;
IntrinsicId intrins;
args [0] = convert (ctx, lhs, op_to_llvm_type (ins->opcode));
args [1] = convert (ctx, rhs, op_to_llvm_type (ins->opcode));
intrins = ovf_op_to_intrins (ins->opcode);
val = call_intrins (ctx, intrins, args, "");
values [ins->dreg] = LLVMBuildExtractValue (builder, val, 0, dname);
ovf = LLVMBuildExtractValue (builder, val, 1, "");
emit_cond_system_exception (ctx, bb, ins->inst_exc_name, ovf, FALSE);
if (!ctx_ok (ctx))
break;
builder = ctx->builder;
break;
}
/*
* Valuetypes.
* We currently model them using arrays. Promotion to local vregs is
* disabled for them in mono_handle_global_vregs () in the LLVM case,
* so we always have an entry in cfg->varinfo for them.
* FIXME: Is this needed ?
*/
case OP_VZERO: {
MonoClass *klass = ins->klass;
if (!klass) {
// FIXME:
set_failure (ctx, "!klass");
break;
}
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (klass), "vzero");
LLVMValueRef ptr = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), "");
emit_memset (ctx, builder, ptr, const_int32 (mono_class_value_size (klass, NULL)), 0);
break;
}
case OP_DUMMY_VZERO:
break;
case OP_STOREV_MEMBASE:
case OP_LOADV_MEMBASE:
case OP_VMOVE: {
MonoClass *klass = ins->klass;
LLVMValueRef src = NULL, dst, args [5];
gboolean done = FALSE;
gboolean is_volatile = FALSE;
if (!klass) {
// FIXME:
set_failure (ctx, "!klass");
break;
}
if (mini_is_gsharedvt_klass (klass)) {
// FIXME:
set_failure (ctx, "gsharedvt");
break;
}
switch (ins->opcode) {
case OP_STOREV_MEMBASE:
if (cfg->gen_write_barriers && m_class_has_references (klass) && ins->inst_destbasereg != cfg->frame_reg &&
LLVMGetInstructionOpcode (values [ins->inst_destbasereg]) != LLVMAlloca) {
/* Decomposed earlier */
g_assert_not_reached ();
break;
}
if (!addresses [ins->sreg1]) {
/* SIMD */
g_assert (values [ins->sreg1]);
dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (klass)), 0));
LLVMBuildStore (builder, values [ins->sreg1], dst);
done = TRUE;
} else {
src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), "");
dst = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0));
}
break;
case OP_LOADV_MEMBASE:
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass));
src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (LLVMInt8Type (), 0));
dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), "");
break;
case OP_VMOVE:
if (!addresses [ins->sreg1])
addresses [ins->sreg1] = build_alloca (ctx, m_class_get_byval_arg (klass));
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_alloca (ctx, m_class_get_byval_arg (klass));
src = LLVMBuildBitCast (builder, addresses [ins->sreg1], LLVMPointerType (LLVMInt8Type (), 0), "");
dst = LLVMBuildBitCast (builder, addresses [ins->dreg], LLVMPointerType (LLVMInt8Type (), 0), "");
break;
default:
g_assert_not_reached ();
}
if (!ctx_ok (ctx))
break;
if (done)
break;
#ifdef TARGET_WASM
is_volatile = m_class_has_references (klass);
#endif
int aindex = 0;
args [aindex ++] = dst;
args [aindex ++] = src;
args [aindex ++] = LLVMConstInt (LLVMInt32Type (), mono_class_value_size (klass, NULL), FALSE);
args [aindex ++] = LLVMConstInt (LLVMInt1Type (), is_volatile ? 1 : 0, FALSE);
call_intrins (ctx, INTRINS_MEMCPY, args, "");
break;
}
case OP_LLVM_OUTARG_VT: {
LLVMArgInfo *ainfo = (LLVMArgInfo*)ins->inst_p0;
MonoType *t = mini_get_underlying_type (ins->inst_vtype);
if (ainfo->storage == LLVMArgGsharedvtVariable) {
MonoInst *var = get_vreg_to_inst (cfg, ins->sreg1);
if (var && var->opcode == OP_GSHAREDVT_LOCAL) {
addresses [ins->dreg] = convert (ctx, emit_gsharedvt_ldaddr (ctx, var->dreg), LLVMPointerType (IntPtrType (), 0));
} else {
g_assert (addresses [ins->sreg1]);
addresses [ins->dreg] = addresses [ins->sreg1];
}
} else if (ainfo->storage == LLVMArgGsharedvtFixed) {
if (!addresses [ins->sreg1]) {
addresses [ins->sreg1] = build_alloca (ctx, t);
g_assert (values [ins->sreg1]);
}
LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], LLVMGetElementType (LLVMTypeOf (addresses [ins->sreg1]))), addresses [ins->sreg1]);
addresses [ins->dreg] = addresses [ins->sreg1];
} else {
if (!addresses [ins->sreg1]) {
addresses [ins->sreg1] = build_named_alloca (ctx, t, "llvm_outarg_vt");
g_assert (values [ins->sreg1]);
LLVMBuildStore (builder, convert (ctx, values [ins->sreg1], type_to_llvm_type (ctx, t)), addresses [ins->sreg1]);
addresses [ins->dreg] = addresses [ins->sreg1];
} else if (ainfo->storage == LLVMArgVtypeAddr || values [ins->sreg1] == addresses [ins->sreg1]) {
/* LLVMArgVtypeByRef/LLVMArgVtypeAddr, have to make a copy */
addresses [ins->dreg] = build_alloca (ctx, t);
LLVMValueRef v = LLVMBuildLoad (builder, addresses [ins->sreg1], "llvm_outarg_vt_copy");
LLVMBuildStore (builder, convert (ctx, v, type_to_llvm_type (ctx, t)), addresses [ins->dreg]);
} else {
if (values [ins->sreg1]) {
LLVMTypeRef src_t = LLVMTypeOf (values [ins->sreg1]);
LLVMValueRef dst = convert (ctx, addresses [ins->sreg1], LLVMPointerType (src_t, 0));
LLVMBuildStore (builder, values [ins->sreg1], dst);
}
addresses [ins->dreg] = addresses [ins->sreg1];
}
}
break;
}
case OP_OBJC_GET_SELECTOR: {
const char *name = (const char*)ins->inst_p0;
LLVMValueRef var;
if (!ctx->module->objc_selector_to_var) {
ctx->module->objc_selector_to_var = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
LLVMValueRef info_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), 8), "@OBJC_IMAGE_INFO");
int32_t objc_imageinfo [] = { 0, 16 };
LLVMSetInitializer (info_var, mono_llvm_create_constant_data_array ((uint8_t *) &objc_imageinfo, 8));
LLVMSetLinkage (info_var, LLVMPrivateLinkage);
LLVMSetExternallyInitialized (info_var, TRUE);
LLVMSetSection (info_var, "__DATA, __objc_imageinfo,regular,no_dead_strip");
LLVMSetAlignment (info_var, sizeof (target_mgreg_t));
mark_as_used (ctx->module, info_var);
}
var = (LLVMValueRef)g_hash_table_lookup (ctx->module->objc_selector_to_var, name);
if (!var) {
LLVMValueRef indexes [16];
LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, LLVMArrayType (LLVMInt8Type (), strlen (name) + 1), "@OBJC_METH_VAR_NAME_");
LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((const uint8_t*)name, strlen (name) + 1));
LLVMSetLinkage (name_var, LLVMPrivateLinkage);
LLVMSetSection (name_var, "__TEXT,__objc_methname,cstring_literals");
mark_as_used (ctx->module, name_var);
LLVMValueRef ref_var = LLVMAddGlobal (ctx->lmodule, LLVMPointerType (LLVMInt8Type (), 0), "@OBJC_SELECTOR_REFERENCES_");
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, 0);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 0, 0);
LLVMSetInitializer (ref_var, LLVMConstGEP (name_var, indexes, 2));
LLVMSetLinkage (ref_var, LLVMPrivateLinkage);
LLVMSetExternallyInitialized (ref_var, TRUE);
LLVMSetSection (ref_var, "__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
LLVMSetAlignment (ref_var, sizeof (target_mgreg_t));
mark_as_used (ctx->module, ref_var);
g_hash_table_insert (ctx->module->objc_selector_to_var, g_strdup (name), ref_var);
var = ref_var;
}
values [ins->dreg] = LLVMBuildLoad (builder, var, "");
break;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM)
case OP_EXTRACTX_U2:
case OP_XEXTRACT_I1:
case OP_XEXTRACT_I2:
case OP_XEXTRACT_I4:
case OP_XEXTRACT_I8:
case OP_XEXTRACT_R4:
case OP_XEXTRACT_R8:
case OP_EXTRACT_I1:
case OP_EXTRACT_I2:
case OP_EXTRACT_I4:
case OP_EXTRACT_I8:
case OP_EXTRACT_R4:
case OP_EXTRACT_R8: {
MonoTypeEnum mono_elt_t = inst_c1_type (ins);
LLVMTypeRef elt_t = primitive_type_to_llvm_type (mono_elt_t);
gboolean sext = FALSE;
gboolean zext = FALSE;
switch (mono_elt_t) {
case MONO_TYPE_I1: case MONO_TYPE_I2: sext = TRUE; break;
case MONO_TYPE_U1: case MONO_TYPE_U2: zext = TRUE; break;
}
LLVMValueRef element_ix = NULL;
switch (ins->opcode) {
case OP_XEXTRACT_I1:
case OP_XEXTRACT_I2:
case OP_XEXTRACT_I4:
case OP_XEXTRACT_R4:
case OP_XEXTRACT_R8:
case OP_XEXTRACT_I8:
element_ix = rhs;
break;
default:
element_ix = const_int32 (ins->inst_c0);
}
LLVMTypeRef lhs_t = LLVMTypeOf (lhs);
int vec_width = mono_llvm_get_prim_size_bits (lhs_t);
int elem_width = mono_llvm_get_prim_size_bits (elt_t);
int elements = vec_width / elem_width;
element_ix = LLVMBuildAnd (builder, element_ix, const_int32 (elements - 1), "extract");
LLVMTypeRef ret_t = LLVMVectorType (elt_t, elements);
LLVMValueRef src = LLVMBuildBitCast (builder, lhs, ret_t, "extract");
LLVMValueRef result = LLVMBuildExtractElement (builder, src, element_ix, "extract");
if (zext)
result = LLVMBuildZExt (builder, result, i4_t, "extract_zext");
else if (sext)
result = LLVMBuildSExt (builder, result, i4_t, "extract_sext");
values [ins->dreg] = result;
break;
}
case OP_XINSERT_I1:
case OP_XINSERT_I2:
case OP_XINSERT_I4:
case OP_XINSERT_I8:
case OP_XINSERT_R4:
case OP_XINSERT_R8: {
MonoTypeEnum primty = inst_c1_type (ins);
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
int elements = LLVMGetVectorSize (ret_t);
LLVMValueRef element_ix = LLVMBuildAnd (builder, arg3, const_int32 (elements - 1), "xinsert");
LLVMValueRef vec = convert (ctx, lhs, ret_t);
LLVMValueRef val = convert_full (ctx, rhs, elem_t, primitive_type_is_unsigned (primty));
LLVMValueRef result = LLVMBuildInsertElement (builder, vec, val, element_ix, "xinsert");
values [ins->dreg] = result;
break;
}
case OP_EXPAND_I1:
case OP_EXPAND_I2:
case OP_EXPAND_I4:
case OP_EXPAND_I8:
case OP_EXPAND_R4:
case OP_EXPAND_R8: {
LLVMTypeRef t;
LLVMValueRef mask [MAX_VECTOR_ELEMS], v;
int i;
t = simd_class_to_llvm_type (ctx, ins->klass);
for (i = 0; i < MAX_VECTOR_ELEMS; ++i)
mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
v = convert (ctx, values [ins->sreg1], LLVMGetElementType (t));
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (t), v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
values [ins->dreg] = LLVMBuildShuffleVector (builder, values [ins->dreg], LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), "");
break;
}
case OP_XZERO: {
values [ins->dreg] = LLVMConstNull (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)));
break;
}
case OP_LOADX_MEMBASE: {
LLVMTypeRef t = type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass));
LLVMValueRef src;
src = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_basereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0));
values [ins->dreg] = mono_llvm_build_aligned_load (builder, src, "", FALSE, 1);
break;
}
case OP_STOREX_MEMBASE: {
LLVMTypeRef t = LLVMTypeOf (values [ins->sreg1]);
LLVMValueRef dest;
dest = convert (ctx, LLVMBuildAdd (builder, convert (ctx, values [ins->inst_destbasereg], IntPtrType ()), LLVMConstInt (IntPtrType (), ins->inst_offset, FALSE), ""), LLVMPointerType (t, 0));
mono_llvm_build_aligned_store (builder, values [ins->sreg1], dest, FALSE, 1);
break;
}
case OP_XBINOP:
case OP_XBINOP_SCALAR:
case OP_XBINOP_BYSCALAR: {
gboolean scalar = ins->opcode == OP_XBINOP_SCALAR;
gboolean byscalar = ins->opcode == OP_XBINOP_BYSCALAR;
LLVMValueRef result = NULL;
LLVMValueRef args [] = { lhs, rhs };
if (scalar)
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
if (byscalar) {
LLVMTypeRef t = LLVMTypeOf (args [0]);
unsigned int elems = LLVMGetVectorSize (t);
args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems);
}
LLVMValueRef l = args [0];
LLVMValueRef r = args [1];
switch (ins->inst_c0) {
case OP_IADD:
result = LLVMBuildAdd (builder, l, r, "");
break;
case OP_ISUB:
result = LLVMBuildSub (builder, l, r, "");
break;
case OP_IMUL:
result = LLVMBuildMul (builder, l, r, "");
break;
case OP_IAND:
result = LLVMBuildAnd (builder, l, r, "");
break;
case OP_IOR:
result = LLVMBuildOr (builder, l, r, "");
break;
case OP_IXOR:
result = LLVMBuildXor (builder, l, r, "");
break;
case OP_FADD:
result = LLVMBuildFAdd (builder, l, r, "");
break;
case OP_FSUB:
result = LLVMBuildFSub (builder, l, r, "");
break;
case OP_FMUL:
result = LLVMBuildFMul (builder, l, r, "");
break;
case OP_FDIV:
result = LLVMBuildFDiv (builder, l, r, "");
break;
case OP_FMAX:
case OP_FMIN: {
LLVMValueRef args [] = { l, r };
#if defined(TARGET_X86) || defined(TARGET_AMD64)
LLVMTypeRef t = LLVMTypeOf (l);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
unsigned int v_size = elems * elem_bits;
if (v_size == 128) {
gboolean is_r4 = ins->inst_c1 == MONO_TYPE_R4;
int iid = -1;
if (ins->inst_c0 == OP_FMAX) {
if (elems == 1)
iid = is_r4 ? INTRINS_SSE_MAXSS : INTRINS_SSE_MAXSD;
else
iid = is_r4 ? INTRINS_SSE_MAXPS : INTRINS_SSE_MAXPD;
} else {
if (elems == 1)
iid = is_r4 ? INTRINS_SSE_MINSS : INTRINS_SSE_MINSD;
else
iid = is_r4 ? INTRINS_SSE_MINPS : INTRINS_SSE_MINPD;
}
result = call_intrins (ctx, iid, args, dname);
} else {
LLVMRealPredicate op = ins->inst_c0 == OP_FMAX ? LLVMRealUGE : LLVMRealULE;
LLVMValueRef cmp = LLVMBuildFCmp (builder, op, l, r, "");
result = LLVMBuildSelect (builder, cmp, l, r, "");
}
#elif defined(TARGET_ARM64)
IntrinsicId iid = ins->inst_c0 == OP_FMAX ? INTRINS_AARCH64_ADV_SIMD_FMAX : INTRINS_AARCH64_ADV_SIMD_FMIN;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
#else
NOT_IMPLEMENTED;
#endif
break;
}
case OP_IMAX:
case OP_IMIN: {
gboolean is_unsigned = ins->inst_c1 == MONO_TYPE_U1 || ins->inst_c1 == MONO_TYPE_U2 || ins->inst_c1 == MONO_TYPE_U4 || ins->inst_c1 == MONO_TYPE_U8;
LLVMIntPredicate op;
switch (ins->inst_c0) {
case OP_IMAX:
op = is_unsigned ? LLVMIntUGT : LLVMIntSGT;
break;
case OP_IMIN:
op = is_unsigned ? LLVMIntULT : LLVMIntSLT;
break;
default:
g_assert_not_reached ();
}
#if defined(TARGET_ARM64)
if ((ins->inst_c1 == MONO_TYPE_U8) || (ins->inst_c1 == MONO_TYPE_I8)) {
LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, "");
result = LLVMBuildSelect (builder, cmp, l, r, "");
} else {
IntrinsicId iid;
switch (ins->inst_c0) {
case OP_IMAX:
iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMAX : INTRINS_AARCH64_ADV_SIMD_SMAX;
break;
case OP_IMIN:
iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMIN : INTRINS_AARCH64_ADV_SIMD_SMIN;
break;
default:
g_assert_not_reached ();
}
LLVMValueRef args [] = { l, r };
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
}
#else
LLVMValueRef cmp = LLVMBuildICmp (builder, op, l, r, "");
result = LLVMBuildSelect (builder, cmp, l, r, "");
#endif
break;
}
default:
g_assert_not_reached ();
}
if (scalar)
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_XBINOP_FORCEINT: {
LLVMTypeRef t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef intermediate_elem_t = LLVMIntType (elem_bits);
LLVMTypeRef intermediate_t = LLVMVectorType (intermediate_elem_t, elems);
LLVMValueRef lhs_int = convert (ctx, lhs, intermediate_t);
LLVMValueRef rhs_int = convert (ctx, rhs, intermediate_t);
LLVMValueRef result = NULL;
switch (ins->inst_c0) {
case XBINOP_FORCEINT_and:
result = LLVMBuildAnd (builder, lhs_int, rhs_int, "");
break;
case XBINOP_FORCEINT_or:
result = LLVMBuildOr (builder, lhs_int, rhs_int, "");
break;
case XBINOP_FORCEINT_ornot:
result = LLVMBuildNot (builder, rhs_int, "");
result = LLVMBuildOr (builder, result, lhs_int, "");
break;
case XBINOP_FORCEINT_xor:
result = LLVMBuildXor (builder, lhs_int, rhs_int, "");
break;
}
values [ins->dreg] = LLVMBuildBitCast (builder, result, t, "");
break;
}
case OP_CREATE_SCALAR:
case OP_CREATE_SCALAR_UNSAFE: {
MonoTypeEnum primty = inst_c1_type (ins);
LLVMTypeRef type = simd_class_to_llvm_type (ctx, ins->klass);
// use undef vector (most likely empty but may contain garbage values) for OP_CREATE_SCALAR_UNSAFE
// and zero one for OP_CREATE_SCALAR
LLVMValueRef vector = (ins->opcode == OP_CREATE_SCALAR) ? LLVMConstNull (type) : LLVMGetUndef (type);
LLVMValueRef val = convert_full (ctx, lhs, primitive_type_to_llvm_type (primty), primitive_type_is_unsigned (primty));
values [ins->dreg] = LLVMBuildInsertElement (builder, vector, val, const_int32 (0), "");
break;
}
case OP_INSERT_I1:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt8Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_I2:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt16Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_I4:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt32Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_I8:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMInt64Type ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_R4:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMFloatType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_INSERT_R8:
values [ins->dreg] = LLVMBuildInsertElement (builder, values [ins->sreg1], convert (ctx, values [ins->sreg2], LLVMDoubleType ()), LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE), dname);
break;
case OP_XCAST: {
LLVMTypeRef t = simd_class_to_llvm_type (ctx, ins->klass);
values [ins->dreg] = LLVMBuildBitCast (builder, lhs, t, "");
break;
}
case OP_XCONCAT: {
values [ins->dreg] = concatenate_vectors (ctx, lhs, rhs);
break;
}
case OP_XINSERT_LOWER:
case OP_XINSERT_UPPER: {
const char *oname = ins->opcode == OP_XINSERT_LOWER ? "xinsert_lower" : "xinsert_upper";
int ix = ins->opcode == OP_XINSERT_LOWER ? 0 : 1;
LLVMTypeRef src_t = LLVMTypeOf (lhs);
unsigned int width = mono_llvm_get_prim_size_bits (src_t);
LLVMTypeRef int_t = LLVMIntType (width / 2);
LLVMTypeRef intvec_t = LLVMVectorType (int_t, 2);
LLVMValueRef insval = LLVMBuildBitCast (builder, rhs, int_t, oname);
LLVMValueRef val = LLVMBuildBitCast (builder, lhs, intvec_t, oname);
val = LLVMBuildInsertElement (builder, val, insval, const_int32 (ix), oname);
val = LLVMBuildBitCast (builder, val, src_t, oname);
values [ins->dreg] = val;
break;
}
case OP_XLOWER:
case OP_XUPPER: {
const char *oname = ins->opcode == OP_XLOWER ? "xlower" : "xupper";
LLVMTypeRef src_t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (src_t);
g_assert (elems >= 2 && elems <= MAX_VECTOR_ELEMS);
unsigned int ret_elems = elems / 2;
int startix = ins->opcode == OP_XLOWER ? 0 : ret_elems;
LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, LLVMGetUndef (src_t), create_const_vector_i32 (&mask_0_incr_1 [startix], ret_elems), oname);
values [ins->dreg] = val;
break;
}
case OP_XWIDEN:
case OP_XWIDEN_UNSAFE: {
const char *oname = ins->opcode == OP_XWIDEN ? "xwiden" : "xwiden_unsafe";
LLVMTypeRef src_t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (src_t);
g_assert (elems <= MAX_VECTOR_ELEMS / 2);
unsigned int ret_elems = elems * 2;
LLVMValueRef upper = ins->opcode == OP_XWIDEN ? LLVMConstNull (src_t) : LLVMGetUndef (src_t);
LLVMValueRef val = LLVMBuildShuffleVector (builder, lhs, upper, create_const_vector_i32 (mask_0_incr_1, ret_elems), oname);
values [ins->dreg] = val;
break;
}
#endif // defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_WASM)
#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM)
case OP_PADDB:
case OP_PADDW:
case OP_PADDD:
case OP_PADDQ:
values [ins->dreg] = LLVMBuildAdd (builder, lhs, rhs, "");
break;
case OP_ADDPD:
case OP_ADDPS:
values [ins->dreg] = LLVMBuildFAdd (builder, lhs, rhs, "");
break;
case OP_PSUBB:
case OP_PSUBW:
case OP_PSUBD:
case OP_PSUBQ:
values [ins->dreg] = LLVMBuildSub (builder, lhs, rhs, "");
break;
case OP_SUBPD:
case OP_SUBPS:
values [ins->dreg] = LLVMBuildFSub (builder, lhs, rhs, "");
break;
case OP_MULPD:
case OP_MULPS:
values [ins->dreg] = LLVMBuildFMul (builder, lhs, rhs, "");
break;
case OP_DIVPD:
case OP_DIVPS:
values [ins->dreg] = LLVMBuildFDiv (builder, lhs, rhs, "");
break;
case OP_PAND:
values [ins->dreg] = LLVMBuildAnd (builder, lhs, rhs, "");
break;
case OP_POR:
values [ins->dreg] = LLVMBuildOr (builder, lhs, rhs, "");
break;
case OP_PXOR:
values [ins->dreg] = LLVMBuildXor (builder, lhs, rhs, "");
break;
case OP_PMULW:
case OP_PMULD:
values [ins->dreg] = LLVMBuildMul (builder, lhs, rhs, "");
break;
case OP_ANDPS:
case OP_ANDNPS:
case OP_ORPS:
case OP_XORPS:
case OP_ANDPD:
case OP_ANDNPD:
case OP_ORPD:
case OP_XORPD: {
LLVMTypeRef t, rt;
LLVMValueRef v = NULL;
switch (ins->opcode) {
case OP_ANDPS:
case OP_ANDNPS:
case OP_ORPS:
case OP_XORPS:
t = LLVMVectorType (LLVMInt32Type (), 4);
rt = LLVMVectorType (LLVMFloatType (), 4);
break;
case OP_ANDPD:
case OP_ANDNPD:
case OP_ORPD:
case OP_XORPD:
t = LLVMVectorType (LLVMInt64Type (), 2);
rt = LLVMVectorType (LLVMDoubleType (), 2);
break;
default:
t = LLVMInt32Type ();
rt = LLVMInt32Type ();
g_assert_not_reached ();
}
lhs = LLVMBuildBitCast (builder, lhs, t, "");
rhs = LLVMBuildBitCast (builder, rhs, t, "");
switch (ins->opcode) {
case OP_ANDPS:
case OP_ANDPD:
v = LLVMBuildAnd (builder, lhs, rhs, "");
break;
case OP_ORPS:
case OP_ORPD:
v = LLVMBuildOr (builder, lhs, rhs, "");
break;
case OP_XORPS:
case OP_XORPD:
v = LLVMBuildXor (builder, lhs, rhs, "");
break;
case OP_ANDNPS:
case OP_ANDNPD:
v = LLVMBuildAnd (builder, rhs, LLVMBuildNot (builder, lhs, ""), "");
break;
}
values [ins->dreg] = LLVMBuildBitCast (builder, v, rt, "");
break;
}
case OP_PMIND_UN:
case OP_PMINW_UN:
case OP_PMINB_UN: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntULT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PMAXD_UN:
case OP_PMAXW_UN:
case OP_PMAXB_UN: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntUGT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PMINW: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSLT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PMAXW: {
LLVMValueRef cmp = LLVMBuildICmp (builder, LLVMIntSGT, lhs, rhs, "");
values [ins->dreg] = LLVMBuildSelect (builder, cmp, lhs, rhs, "");
break;
}
case OP_PAVGB_UN:
case OP_PAVGW_UN: {
LLVMValueRef ones_vec;
LLVMValueRef ones [MAX_VECTOR_ELEMS];
int vector_size = LLVMGetVectorSize (LLVMTypeOf (lhs));
LLVMTypeRef ext_elem_type = vector_size == 16 ? LLVMInt16Type () : LLVMInt32Type ();
for (int i = 0; i < MAX_VECTOR_ELEMS; ++i)
ones [i] = LLVMConstInt (ext_elem_type, 1, FALSE);
ones_vec = LLVMConstVector (ones, vector_size);
LLVMValueRef val;
LLVMTypeRef ext_type = LLVMVectorType (ext_elem_type, vector_size);
/* Have to increase the vector element size to prevent overflows */
/* res = trunc ((zext (lhs) + zext (rhs) + 1) >> 1) */
val = LLVMBuildAdd (builder, LLVMBuildZExt (builder, lhs, ext_type, ""), LLVMBuildZExt (builder, rhs, ext_type, ""), "");
val = LLVMBuildAdd (builder, val, ones_vec, "");
val = LLVMBuildLShr (builder, val, ones_vec, "");
values [ins->dreg] = LLVMBuildTrunc (builder, val, LLVMTypeOf (lhs), "");
break;
}
case OP_PCMPEQB:
case OP_PCMPEQW:
case OP_PCMPEQD:
case OP_PCMPEQQ:
case OP_PCMPGTB: {
LLVMValueRef pcmp;
LLVMTypeRef retType;
LLVMIntPredicate cmpOp;
if (ins->opcode == OP_PCMPGTB)
cmpOp = LLVMIntSGT;
else
cmpOp = LLVMIntEQ;
if (LLVMTypeOf (lhs) == LLVMTypeOf (rhs)) {
pcmp = LLVMBuildICmp (builder, cmpOp, lhs, rhs, "");
retType = LLVMTypeOf (lhs);
} else {
LLVMTypeRef flatType = LLVMVectorType (LLVMInt8Type (), 16);
LLVMValueRef flatRHS = convert (ctx, rhs, flatType);
LLVMValueRef flatLHS = convert (ctx, lhs, flatType);
pcmp = LLVMBuildICmp (builder, cmpOp, flatLHS, flatRHS, "");
retType = flatType;
}
values [ins->dreg] = LLVMBuildSExt (builder, pcmp, retType, "");
break;
}
case OP_CVTDQ2PS: {
LLVMValueRef i4 = LLVMBuildBitCast (builder, lhs, sse_i4_t, "");
values [ins->dreg] = LLVMBuildSIToFP (builder, i4, sse_r4_t, dname);
break;
}
case OP_CVTDQ2PD: {
LLVMValueRef indexes [16];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
LLVMValueRef mask = LLVMConstVector (indexes, 2);
LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, "");
values [ins->dreg] = LLVMBuildSIToFP (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname);
break;
}
case OP_SSE2_CVTSS2SD: {
LLVMValueRef rhs_elem = LLVMBuildExtractElement (builder, rhs, const_int32 (0), "");
LLVMValueRef fpext = LLVMBuildFPExt (builder, rhs_elem, LLVMDoubleType (), dname);
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fpext, const_int32 (0), "");
break;
}
case OP_CVTPS2PD: {
LLVMValueRef indexes [16];
indexes [0] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
indexes [1] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
LLVMValueRef mask = LLVMConstVector (indexes, 2);
LLVMValueRef shuffle = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), mask, "");
values [ins->dreg] = LLVMBuildFPExt (builder, shuffle, LLVMVectorType (LLVMDoubleType (), 2), dname);
break;
}
case OP_CVTTPS2DQ:
values [ins->dreg] = LLVMBuildFPToSI (builder, lhs, LLVMVectorType (LLVMInt32Type (), 4), dname);
break;
case OP_CVTPD2DQ:
case OP_CVTPS2DQ:
case OP_CVTPD2PS:
case OP_CVTTPD2DQ: {
LLVMValueRef v;
v = convert (ctx, values [ins->sreg1], simd_op_to_llvm_type (ins->opcode));
values [ins->dreg] = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &v, dname);
break;
}
case OP_COMPPS:
case OP_COMPPD: {
LLVMRealPredicate op;
switch (ins->inst_c0) {
case SIMD_COMP_EQ:
op = LLVMRealOEQ;
break;
case SIMD_COMP_LT:
op = LLVMRealOLT;
break;
case SIMD_COMP_LE:
op = LLVMRealOLE;
break;
case SIMD_COMP_UNORD:
op = LLVMRealUNO;
break;
case SIMD_COMP_NEQ:
op = LLVMRealUNE;
break;
case SIMD_COMP_NLT:
op = LLVMRealUGE;
break;
case SIMD_COMP_NLE:
op = LLVMRealUGT;
break;
case SIMD_COMP_ORD:
op = LLVMRealORD;
break;
default:
g_assert_not_reached ();
}
LLVMValueRef cmp = LLVMBuildFCmp (builder, op, lhs, rhs, "");
if (ins->opcode == OP_COMPPD)
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), 2), ""), LLVMTypeOf (lhs), "");
else
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), 4), ""), LLVMTypeOf (lhs), "");
break;
}
case OP_ICONV_TO_X:
/* This is only used for implementing shifts by non-immediate */
values [ins->dreg] = lhs;
break;
case OP_SHUFPS:
case OP_SHUFPD:
case OP_PSHUFLED:
case OP_PSHUFLEW_LOW:
case OP_PSHUFLEW_HIGH: {
int mask [16];
LLVMValueRef v1 = NULL, v2 = NULL, mask_values [16];
int i, mask_size = 0;
int imask = ins->inst_c0;
/* Convert the x86 shuffle mask to LLVM's */
switch (ins->opcode) {
case OP_SHUFPS:
mask_size = 4;
mask [0] = ((imask >> 0) & 3);
mask [1] = ((imask >> 2) & 3);
mask [2] = ((imask >> 4) & 3) + 4;
mask [3] = ((imask >> 6) & 3) + 4;
v1 = values [ins->sreg1];
v2 = values [ins->sreg2];
break;
case OP_SHUFPD:
mask_size = 2;
mask [0] = ((imask >> 0) & 1);
mask [1] = ((imask >> 1) & 1) + 2;
v1 = values [ins->sreg1];
v2 = values [ins->sreg2];
break;
case OP_PSHUFLEW_LOW:
mask_size = 8;
mask [0] = ((imask >> 0) & 3);
mask [1] = ((imask >> 2) & 3);
mask [2] = ((imask >> 4) & 3);
mask [3] = ((imask >> 6) & 3);
mask [4] = 4 + 0;
mask [5] = 4 + 1;
mask [6] = 4 + 2;
mask [7] = 4 + 3;
v1 = values [ins->sreg1];
v2 = LLVMGetUndef (LLVMTypeOf (v1));
break;
case OP_PSHUFLEW_HIGH:
mask_size = 8;
mask [0] = 0;
mask [1] = 1;
mask [2] = 2;
mask [3] = 3;
mask [4] = 4 + ((imask >> 0) & 3);
mask [5] = 4 + ((imask >> 2) & 3);
mask [6] = 4 + ((imask >> 4) & 3);
mask [7] = 4 + ((imask >> 6) & 3);
v1 = values [ins->sreg1];
v2 = LLVMGetUndef (LLVMTypeOf (v1));
break;
case OP_PSHUFLED:
mask_size = 4;
mask [0] = ((imask >> 0) & 3);
mask [1] = ((imask >> 2) & 3);
mask [2] = ((imask >> 4) & 3);
mask [3] = ((imask >> 6) & 3);
v1 = values [ins->sreg1];
v2 = LLVMGetUndef (LLVMTypeOf (v1));
break;
default:
g_assert_not_reached ();
}
for (i = 0; i < mask_size; ++i)
mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE);
values [ins->dreg] =
LLVMBuildShuffleVector (builder, v1, v2,
LLVMConstVector (mask_values, mask_size), dname);
break;
}
case OP_UNPACK_LOWB:
case OP_UNPACK_LOWW:
case OP_UNPACK_LOWD:
case OP_UNPACK_LOWQ:
case OP_UNPACK_LOWPS:
case OP_UNPACK_LOWPD:
case OP_UNPACK_HIGHB:
case OP_UNPACK_HIGHW:
case OP_UNPACK_HIGHD:
case OP_UNPACK_HIGHQ:
case OP_UNPACK_HIGHPS:
case OP_UNPACK_HIGHPD: {
int mask [16];
LLVMValueRef mask_values [16];
int i, mask_size = 0;
gboolean low = FALSE;
switch (ins->opcode) {
case OP_UNPACK_LOWB:
mask_size = 16;
low = TRUE;
break;
case OP_UNPACK_LOWW:
mask_size = 8;
low = TRUE;
break;
case OP_UNPACK_LOWD:
case OP_UNPACK_LOWPS:
mask_size = 4;
low = TRUE;
break;
case OP_UNPACK_LOWQ:
case OP_UNPACK_LOWPD:
mask_size = 2;
low = TRUE;
break;
case OP_UNPACK_HIGHB:
mask_size = 16;
break;
case OP_UNPACK_HIGHW:
mask_size = 8;
break;
case OP_UNPACK_HIGHD:
case OP_UNPACK_HIGHPS:
mask_size = 4;
break;
case OP_UNPACK_HIGHQ:
case OP_UNPACK_HIGHPD:
mask_size = 2;
break;
default:
g_assert_not_reached ();
}
if (low) {
for (i = 0; i < (mask_size / 2); ++i) {
mask [(i * 2)] = i;
mask [(i * 2) + 1] = mask_size + i;
}
} else {
for (i = 0; i < (mask_size / 2); ++i) {
mask [(i * 2)] = (mask_size / 2) + i;
mask [(i * 2) + 1] = mask_size + (mask_size / 2) + i;
}
}
for (i = 0; i < mask_size; ++i)
mask_values [i] = LLVMConstInt (LLVMInt32Type (), mask [i], FALSE);
values [ins->dreg] =
LLVMBuildShuffleVector (builder, values [ins->sreg1], values [ins->sreg2],
LLVMConstVector (mask_values, mask_size), dname);
break;
}
case OP_DUPPD: {
LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
LLVMValueRef v, val;
v = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
val = LLVMConstNull (t);
val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v, LLVMConstInt (LLVMInt32Type (), 1, FALSE), dname);
values [ins->dreg] = val;
break;
}
case OP_DUPPS_LOW:
case OP_DUPPS_HIGH: {
LLVMTypeRef t = simd_op_to_llvm_type (ins->opcode);
LLVMValueRef v1, v2, val;
if (ins->opcode == OP_DUPPS_LOW) {
v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 2, FALSE), "");
} else {
v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
v2 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 3, FALSE), "");
}
val = LLVMConstNull (t);
val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v1, LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 2, FALSE), "");
val = LLVMBuildInsertElement (builder, val, v2, LLVMConstInt (LLVMInt32Type (), 3, FALSE), "");
values [ins->dreg] = val;
break;
}
case OP_FCONV_TO_R8_X: {
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r8_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
case OP_FCONV_TO_R4_X: {
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (sse_r4_t), lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
#if defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_SSE_MOVMSK: {
LLVMValueRef args [1];
if (ins->inst_c1 == MONO_TYPE_R4) {
args [0] = lhs;
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PS, args, dname);
} else if (ins->inst_c1 == MONO_TYPE_R8) {
args [0] = lhs;
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_MOVMSK_PD, args, dname);
} else {
args [0] = convert (ctx, lhs, sse_i1_t);
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PMOVMSKB, args, dname);
}
break;
}
case OP_SSE_MOVS:
case OP_SSE_MOVS2: {
if (ins->inst_c1 == MONO_TYPE_R4)
values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_4_i32 (0, 5, 6, 7), "");
else if (ins->inst_c1 == MONO_TYPE_R8)
values [ins->dreg] = LLVMBuildShuffleVector (builder, rhs, lhs, create_const_vector_2_i32 (0, 3), "");
else if (ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8)
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs,
LLVMConstInt (LLVMInt64Type (), 0, FALSE),
LLVMConstInt (LLVMInt32Type (), 1, FALSE), "");
else
g_assert_not_reached (); // will be needed for other types later
break;
}
case OP_SSE_MOVEHL: {
if (ins->inst_c1 == MONO_TYPE_R4)
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (6, 7, 2, 3), "");
else
g_assert_not_reached ();
break;
}
case OP_SSE_MOVELH: {
if (ins->inst_c1 == MONO_TYPE_R4)
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 1, 4, 5), "");
else
g_assert_not_reached ();
break;
}
case OP_SSE_UNPACKLO: {
if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (0, 2), "");
} else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (0, 4, 1, 5), "");
} else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) {
const int mask_values [] = { 0, 8, 1, 9, 2, 10, 3, 11 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i2_t),
convert (ctx, rhs, sse_i2_t),
create_const_vector_i32 (mask_values, 8), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) {
const int mask_values [] = { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i1_t),
convert (ctx, rhs, sse_i1_t),
create_const_vector_i32 (mask_values, 16), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else {
g_assert_not_reached ();
}
break;
}
case OP_SSE_UNPACKHI: {
if (ins->inst_c1 == MONO_TYPE_R8 || ins->inst_c1 == MONO_TYPE_I8 || ins->inst_c1 == MONO_TYPE_U8) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_2_i32 (1, 3), "");
} else if (ins->inst_c1 == MONO_TYPE_R4 || ins->inst_c1 == MONO_TYPE_I4 || ins->inst_c1 == MONO_TYPE_U4) {
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_4_i32 (2, 6, 3, 7), "");
} else if (ins->inst_c1 == MONO_TYPE_I2 || ins->inst_c1 == MONO_TYPE_U2) {
const int mask_values [] = { 4, 12, 5, 13, 6, 14, 7, 15 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i2_t),
convert (ctx, rhs, sse_i2_t),
create_const_vector_i32 (mask_values, 8), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else if (ins->inst_c1 == MONO_TYPE_I1 || ins->inst_c1 == MONO_TYPE_U1) {
const int mask_values [] = { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 };
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder,
convert (ctx, lhs, sse_i1_t),
convert (ctx, rhs, sse_i1_t),
create_const_vector_i32 (mask_values, 16), "");
values [ins->dreg] = convert (ctx, shuffled, type_to_sse_type (ins->inst_c1));
} else {
g_assert_not_reached ();
}
break;
}
case OP_SSE_LOADU: {
LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0));
LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), "");
values [ins->dreg] = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, ins->inst_c0); // inst_c0 is alignment
break;
}
case OP_SSE_MOVSS: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0));
LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE);
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMConstNull (type_to_sse_type (ins->inst_c1)), val, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
case OP_SSE_MOVSS_STORE: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMFloatType (), 0));
LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE);
break;
}
case OP_SSE2_MOVD:
case OP_SSE2_MOVQ:
case OP_SSE2_MOVUPD: {
LLVMTypeRef rty = NULL;
switch (ins->opcode) {
case OP_SSE2_MOVD: rty = sse_i4_t; break;
case OP_SSE2_MOVQ: rty = sse_i8_t; break;
case OP_SSE2_MOVUPD: rty = sse_r8_t; break;
}
LLVMTypeRef srcty = LLVMGetElementType (rty);
LLVMValueRef zero = LLVMConstNull (rty);
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (srcty, 0));
LLVMValueRef val = mono_llvm_build_aligned_load (builder, addr, "", FALSE, 1);
values [ins->dreg] = LLVMBuildInsertElement (builder, zero, val, const_int32 (0), dname);
break;
}
case OP_SSE_MOVLPS_LOAD:
case OP_SSE_MOVHPS_LOAD: {
LLVMTypeRef t = LLVMFloatType ();
int size = 4;
gboolean high = ins->opcode == OP_SSE_MOVHPS_LOAD;
/* Load two floats from rhs and store them in the low/high part of lhs */
LLVMValueRef addr = rhs;
LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (t, 0));
LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), size, FALSE), IntPtrType ()), ""), LLVMPointerType (t, 0));
LLVMValueRef val1 = mono_llvm_build_load (builder, addr1, "", FALSE);
LLVMValueRef val2 = mono_llvm_build_load (builder, addr2, "", FALSE);
int index1, index2;
index1 = high ? 2: 0;
index2 = high ? 3 : 1;
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMBuildInsertElement (builder, lhs, val1, LLVMConstInt (LLVMInt32Type (), index1, FALSE), ""), val2, LLVMConstInt (LLVMInt32Type (), index2, FALSE), "");
break;
}
case OP_SSE2_MOVLPD_LOAD:
case OP_SSE2_MOVHPD_LOAD: {
LLVMTypeRef t = LLVMDoubleType ();
LLVMValueRef addr = convert (ctx, rhs, LLVMPointerType (t, 0));
LLVMValueRef val = mono_llvm_build_load (builder, addr, "", FALSE);
int index = ins->opcode == OP_SSE2_MOVHPD_LOAD ? 1 : 0;
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, val, const_int32 (index), "");
break;
}
case OP_SSE_MOVLPS_STORE:
case OP_SSE_MOVHPS_STORE: {
/* Store two floats from the low/hight part of rhs into lhs */
LLVMValueRef addr = lhs;
LLVMValueRef addr1 = convert (ctx, addr, LLVMPointerType (LLVMFloatType (), 0));
LLVMValueRef addr2 = convert (ctx, LLVMBuildAdd (builder, convert (ctx, addr, IntPtrType ()), convert (ctx, LLVMConstInt (LLVMInt32Type (), 4, FALSE), IntPtrType ()), ""), LLVMPointerType (LLVMFloatType (), 0));
int index1 = ins->opcode == OP_SSE_MOVLPS_STORE ? 0 : 2;
int index2 = ins->opcode == OP_SSE_MOVLPS_STORE ? 1 : 3;
LLVMValueRef val1 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index1, FALSE), "");
LLVMValueRef val2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), index2, FALSE), "");
mono_llvm_build_store (builder, val1, addr1, FALSE, LLVM_BARRIER_NONE);
mono_llvm_build_store (builder, val2, addr2, FALSE, LLVM_BARRIER_NONE);
break;
}
case OP_SSE2_MOVLPD_STORE:
case OP_SSE2_MOVHPD_STORE: {
LLVMTypeRef t = LLVMDoubleType ();
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (t, 0));
int index = ins->opcode == OP_SSE2_MOVHPD_STORE ? 1 : 0;
LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, const_int32 (index), "");
mono_llvm_build_store (builder, val, addr, FALSE, LLVM_BARRIER_NONE);
break;
}
case OP_SSE_STORE: {
LLVMValueRef dst_vec = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0));
mono_llvm_build_aligned_store (builder, rhs, dst_vec, FALSE, ins->inst_c0);
break;
}
case OP_SSE_STORES: {
LLVMValueRef first_elem = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMValueRef dst = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (first_elem), 0));
mono_llvm_build_aligned_store (builder, first_elem, dst, FALSE, 1);
break;
}
case OP_SSE_MOVNTPS: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMTypeOf (rhs), 0));
LLVMValueRef store = mono_llvm_build_aligned_store (builder, rhs, addr, FALSE, ins->inst_c0);
set_nontemporal_flag (store);
break;
}
case OP_SSE_PREFETCHT0: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (3), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_PREFETCHT1: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (2), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_PREFETCHT2: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (1), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_PREFETCHNTA: {
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (LLVMInt8Type (), 0));
LLVMValueRef args [] = { addr, const_int32 (0), const_int32 (0), const_int32 (1) };
call_intrins (ctx, INTRINS_PREFETCH, args, "");
break;
}
case OP_SSE_OR: {
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildOr (builder, vec_lhs_i64, vec_rhs_i64, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_XOR: {
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildXor (builder, vec_lhs_i64, vec_rhs_i64, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_AND: {
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_lhs_i64, vec_rhs_i64, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_ANDN: {
LLVMValueRef minus_one [2];
minus_one [0] = LLVMConstInt (LLVMInt64Type (), -1, FALSE);
minus_one [1] = LLVMConstInt (LLVMInt64Type (), -1, FALSE);
LLVMValueRef vec_lhs_i64 = convert (ctx, lhs, sse_i8_t);
LLVMValueRef vec_xor = LLVMBuildXor (builder, vec_lhs_i64, LLVMConstVector (minus_one, 2), "");
LLVMValueRef vec_rhs_i64 = convert (ctx, rhs, sse_i8_t);
LLVMValueRef vec_and = LLVMBuildAnd (builder, vec_rhs_i64, vec_xor, "");
values [ins->dreg] = LLVMBuildBitCast (builder, vec_and, type_to_sse_type (ins->inst_c1), "");
break;
}
case OP_SSE_ADDSS:
case OP_SSE_SUBSS:
case OP_SSE_DIVSS:
case OP_SSE_MULSS:
case OP_SSE2_ADDSD:
case OP_SSE2_SUBSD:
case OP_SSE2_DIVSD:
case OP_SSE2_MULSD: {
LLVMValueRef v1 = LLVMBuildExtractElement (builder, lhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMValueRef v2 = LLVMBuildExtractElement (builder, rhs, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
LLVMValueRef v = NULL;
switch (ins->opcode) {
case OP_SSE_ADDSS:
case OP_SSE2_ADDSD:
v = LLVMBuildFAdd (builder, v1, v2, "");
break;
case OP_SSE_SUBSS:
case OP_SSE2_SUBSD:
v = LLVMBuildFSub (builder, v1, v2, "");
break;
case OP_SSE_DIVSS:
case OP_SSE2_DIVSD:
v = LLVMBuildFDiv (builder, v1, v2, "");
break;
case OP_SSE_MULSS:
case OP_SSE2_MULSD:
v = LLVMBuildFMul (builder, v1, v2, "");
break;
default:
g_assert_not_reached ();
}
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, v, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
break;
}
case OP_SSE_CMPSS:
case OP_SSE2_CMPSD: {
int imm = -1;
gboolean swap = FALSE;
switch (ins->inst_c0) {
case CMP_EQ: imm = SSE_eq_ord_nosignal; break;
case CMP_GT: imm = SSE_lt_ord_signal; swap = TRUE; break;
case CMP_GE: imm = SSE_le_ord_signal; swap = TRUE; break;
case CMP_LT: imm = SSE_lt_ord_signal; break;
case CMP_LE: imm = SSE_le_ord_signal; break;
case CMP_GT_UN: imm = SSE_nle_unord_signal; break;
case CMP_GE_UN: imm = SSE_nlt_unord_signal; break;
case CMP_LT_UN: imm = SSE_nle_unord_signal; swap = TRUE; break;
case CMP_LE_UN: imm = SSE_nlt_unord_signal; swap = TRUE; break;
case CMP_NE: imm = SSE_neq_unord_nosignal; break;
case CMP_ORD: imm = SSE_ord_nosignal; break;
case CMP_UNORD: imm = SSE_unord_nosignal; break;
default: g_assert_not_reached (); break;
}
LLVMValueRef cmp = LLVMConstInt (LLVMInt8Type (), imm, FALSE);
LLVMValueRef args [] = { lhs, rhs, cmp };
if (swap) {
args [0] = rhs;
args [1] = lhs;
}
IntrinsicId id = (IntrinsicId) 0;
switch (ins->opcode) {
case OP_SSE_CMPSS: id = INTRINS_SSE_CMPSS; break;
case OP_SSE2_CMPSD: id = INTRINS_SSE_CMPSD; break;
default: g_assert_not_reached (); break;
}
int elements = LLVMGetVectorSize (LLVMTypeOf (lhs));
int mask_values [MAX_VECTOR_ELEMS] = { 0 };
for (int i = 1; i < elements; ++i) {
mask_values [i] = elements + i;
}
LLVMValueRef result = call_intrins (ctx, id, args, "");
result = LLVMBuildShuffleVector (builder, result, lhs, create_const_vector_i32 (mask_values, elements), "");
values [ins->dreg] = result;
break;
}
case OP_SSE_COMISS: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_COMIEQ_SS; break;
case CMP_GT: id = INTRINS_SSE_COMIGT_SS; break;
case CMP_GE: id = INTRINS_SSE_COMIGE_SS; break;
case CMP_LT: id = INTRINS_SSE_COMILT_SS; break;
case CMP_LE: id = INTRINS_SSE_COMILE_SS; break;
case CMP_NE: id = INTRINS_SSE_COMINEQ_SS; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE_UCOMISS: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SS; break;
case CMP_GT: id = INTRINS_SSE_UCOMIGT_SS; break;
case CMP_GE: id = INTRINS_SSE_UCOMIGE_SS; break;
case CMP_LT: id = INTRINS_SSE_UCOMILT_SS; break;
case CMP_LE: id = INTRINS_SSE_UCOMILE_SS; break;
case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SS; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE2_COMISD: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_COMIEQ_SD; break;
case CMP_GT: id = INTRINS_SSE_COMIGT_SD; break;
case CMP_GE: id = INTRINS_SSE_COMIGE_SD; break;
case CMP_LT: id = INTRINS_SSE_COMILT_SD; break;
case CMP_LE: id = INTRINS_SSE_COMILE_SD; break;
case CMP_NE: id = INTRINS_SSE_COMINEQ_SD; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE2_UCOMISD: {
LLVMValueRef args [] = { lhs, rhs };
IntrinsicId id = (IntrinsicId)0;
switch (ins->inst_c0) {
case CMP_EQ: id = INTRINS_SSE_UCOMIEQ_SD; break;
case CMP_GT: id = INTRINS_SSE_UCOMIGT_SD; break;
case CMP_GE: id = INTRINS_SSE_UCOMIGE_SD; break;
case CMP_LT: id = INTRINS_SSE_UCOMILT_SD; break;
case CMP_LE: id = INTRINS_SSE_UCOMILE_SD; break;
case CMP_NE: id = INTRINS_SSE_UCOMINEQ_SD; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE_CVTSI2SS:
case OP_SSE_CVTSI2SS64:
case OP_SSE2_CVTSI2SD:
case OP_SSE2_CVTSI2SD64: {
LLVMTypeRef ty = LLVMFloatType ();
switch (ins->opcode) {
case OP_SSE2_CVTSI2SD:
case OP_SSE2_CVTSI2SD64:
ty = LLVMDoubleType ();
break;
}
LLVMValueRef fp = LLVMBuildSIToFP (builder, rhs, ty, "");
values [ins->dreg] = LLVMBuildInsertElement (builder, lhs, fp, const_int32 (0), dname);
break;
}
case OP_SSE2_PMULUDQ: {
LLVMValueRef i32_max = LLVMConstInt (LLVMInt64Type (), UINT32_MAX, FALSE);
LLVMValueRef maskvals [] = { i32_max, i32_max };
LLVMValueRef mask = LLVMConstVector (maskvals, 2);
LLVMValueRef l = LLVMBuildAnd (builder, convert (ctx, lhs, sse_i8_t), mask, "");
LLVMValueRef r = LLVMBuildAnd (builder, convert (ctx, rhs, sse_i8_t), mask, "");
values [ins->dreg] = LLVMBuildNUWMul (builder, l, r, dname);
break;
}
case OP_SSE_SQRTSS:
case OP_SSE2_SQRTSD: {
LLVMValueRef upper = values [ins->sreg1];
LLVMValueRef lower = values [ins->sreg2];
LLVMValueRef scalar = LLVMBuildExtractElement (builder, lower, const_int32 (0), "");
LLVMValueRef result = call_intrins (ctx, simd_ins_to_intrins (ins->opcode), &scalar, dname);
values [ins->dreg] = LLVMBuildInsertElement (builder, upper, result, const_int32 (0), "");
break;
}
case OP_SSE_RCPSS:
case OP_SSE_RSQRTSS: {
IntrinsicId id = (IntrinsicId)0;
switch (ins->opcode) {
case OP_SSE_RCPSS: id = INTRINS_SSE_RCP_SS; break;
case OP_SSE_RSQRTSS: id = INTRINS_SSE_RSQRT_SS; break;
default: g_assert_not_reached (); break;
};
LLVMValueRef result = call_intrins (ctx, id, &rhs, dname);
const int mask[] = { 0, 5, 6, 7 };
LLVMValueRef shufmask = create_const_vector_i32 (mask, 4);
values [ins->dreg] = LLVMBuildShuffleVector (builder, result, lhs, shufmask, "");
break;
}
case OP_XOP: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
call_intrins (ctx, id, NULL, "");
break;
}
case OP_XOP_X_I:
case OP_XOP_X_X:
case OP_XOP_I4_X:
case OP_XOP_I8_X:
case OP_XOP_X_X_X:
case OP_XOP_X_X_I4:
case OP_XOP_X_X_I8: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_XOP_I4_X_X: {
gboolean to_i8_t = FALSE;
gboolean ret_bool = FALSE;
IntrinsicId id = (IntrinsicId)ins->inst_c0;
switch (ins->inst_c0) {
case INTRINS_SSE_TESTC: to_i8_t = TRUE; ret_bool = TRUE; break;
case INTRINS_SSE_TESTZ: to_i8_t = TRUE; ret_bool = TRUE; break;
case INTRINS_SSE_TESTNZ: to_i8_t = TRUE; ret_bool = TRUE; break;
default: g_assert_not_reached (); break;
}
LLVMValueRef args [] = { lhs, rhs };
if (to_i8_t) {
args [0] = convert (ctx, args [0], sse_i8_t);
args [1] = convert (ctx, args [1], sse_i8_t);
}
LLVMValueRef call = call_intrins (ctx, id, args, "");
if (ret_bool) {
// if return type is bool (it's still i32) we need to normalize it to 1/0
LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, call, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), "");
} else {
values [ins->dreg] = call;
}
break;
}
case OP_SSE2_MASKMOVDQU: {
LLVMTypeRef i8ptr = LLVMPointerType (LLVMInt8Type (), 0);
LLVMValueRef dstaddr = convert (ctx, values [ins->sreg3], i8ptr);
LLVMValueRef src = convert (ctx, lhs, sse_i1_t);
LLVMValueRef mask = convert (ctx, rhs, sse_i1_t);
LLVMValueRef args[] = { src, mask, dstaddr };
call_intrins (ctx, INTRINS_SSE_MASKMOVDQU, args, "");
break;
}
case OP_PADDB_SAT:
case OP_PADDW_SAT:
case OP_PSUBB_SAT:
case OP_PSUBW_SAT:
case OP_PADDB_SAT_UN:
case OP_PADDW_SAT_UN:
case OP_PSUBB_SAT_UN:
case OP_PSUBW_SAT_UN:
case OP_SSE2_ADDS:
case OP_SSE2_SUBS: {
IntrinsicId id = (IntrinsicId)0;
int type = 0;
gboolean is_add = TRUE;
switch (ins->opcode) {
case OP_PADDB_SAT: type = MONO_TYPE_I1; break;
case OP_PADDW_SAT: type = MONO_TYPE_I2; break;
case OP_PSUBB_SAT: type = MONO_TYPE_I1; is_add = FALSE; break;
case OP_PSUBW_SAT: type = MONO_TYPE_I2; is_add = FALSE; break;
case OP_PADDB_SAT_UN: type = MONO_TYPE_U1; break;
case OP_PADDW_SAT_UN: type = MONO_TYPE_U2; break;
case OP_PSUBB_SAT_UN: type = MONO_TYPE_U1; is_add = FALSE; break;
case OP_PSUBW_SAT_UN: type = MONO_TYPE_U2; is_add = FALSE; break;
case OP_SSE2_ADDS: type = ins->inst_c1; break;
case OP_SSE2_SUBS: type = ins->inst_c1; is_add = FALSE; break;
default: g_assert_not_reached ();
}
if (is_add) {
switch (type) {
case MONO_TYPE_I1: id = INTRINS_SSE_SADD_SATI8; break;
case MONO_TYPE_U1: id = INTRINS_SSE_UADD_SATI8; break;
case MONO_TYPE_I2: id = INTRINS_SSE_SADD_SATI16; break;
case MONO_TYPE_U2: id = INTRINS_SSE_UADD_SATI16; break;
default: g_assert_not_reached (); break;
}
} else {
switch (type) {
case MONO_TYPE_I1: id = INTRINS_SSE_SSUB_SATI8; break;
case MONO_TYPE_U1: id = INTRINS_SSE_USUB_SATI8; break;
case MONO_TYPE_I2: id = INTRINS_SSE_SSUB_SATI16; break;
case MONO_TYPE_U2: id = INTRINS_SSE_USUB_SATI16; break;
default: g_assert_not_reached (); break;
}
}
LLVMTypeRef vecty = type_to_sse_type (type);
LLVMValueRef args [] = { convert (ctx, lhs, vecty), convert (ctx, rhs, vecty) };
LLVMValueRef result = call_intrins (ctx, id, args, dname);
values [ins->dreg] = convert (ctx, result, vecty);
break;
}
case OP_SSE2_PACKUS: {
LLVMValueRef args [2];
args [0] = convert (ctx, lhs, sse_i2_t);
args [1] = convert (ctx, rhs, sse_i2_t);
values [ins->dreg] = convert (ctx,
call_intrins (ctx, INTRINS_SSE_PACKUSWB, args, dname),
type_to_sse_type (ins->inst_c1));
break;
}
case OP_SSE2_SRLI: {
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = convert (ctx,
call_intrins (ctx, INTRINS_SSE_PSRLI_W, args, dname),
type_to_sse_type (ins->inst_c1));
break;
}
case OP_SSE2_PSLLDQ:
case OP_SSE2_PSRLDQ: {
LLVMBasicBlockRef bbs [16 + 1];
LLVMValueRef switch_ins;
LLVMValueRef value = lhs;
LLVMValueRef index = rhs;
LLVMValueRef phi_values [16 + 1];
LLVMTypeRef t = sse_i1_t;
int nelems = 16;
int i;
gboolean shift_right = (ins->opcode == OP_SSE2_PSRLDQ);
value = convert (ctx, value, t);
// No corresponding LLVM intrinsics
// FIXME: Optimize const count
for (i = 0; i < nelems; ++i)
bbs [i] = gen_bb (ctx, "PSLLDQ_CASE_BB");
bbs [nelems] = gen_bb (ctx, "PSLLDQ_DEF_BB");
cbb = gen_bb (ctx, "PSLLDQ_COND_BB");
switch_ins = LLVMBuildSwitch (builder, index, bbs [nelems], 0);
for (i = 0; i < nelems; ++i) {
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i, FALSE), bbs [i]);
LLVMPositionBuilderAtEnd (builder, bbs [i]);
int mask_values [16];
// Implement shift using a shuffle
if (shift_right) {
for (int j = 0; j < nelems - i; ++j)
mask_values [j] = i + j;
for (int j = nelems -i ; j < nelems; ++j)
mask_values [j] = nelems;
} else {
for (int j = 0; j < i; ++j)
mask_values [j] = nelems;
for (int j = 0; j < nelems - i; ++j)
mask_values [j + i] = j;
}
phi_values [i] = LLVMBuildShuffleVector (builder, value, LLVMGetUndef (t), create_const_vector_i32 (mask_values, nelems), "");
LLVMBuildBr (builder, cbb);
}
/* Default case */
LLVMPositionBuilderAtEnd (builder, bbs [nelems]);
phi_values [nelems] = LLVMConstNull (t);
LLVMBuildBr (builder, cbb);
LLVMPositionBuilderAtEnd (builder, cbb);
values [ins->dreg] = LLVMBuildPhi (builder, LLVMTypeOf (phi_values [0]), "");
LLVMAddIncoming (values [ins->dreg], phi_values, bbs, nelems + 1);
values [ins->dreg] = convert (ctx, values [ins->dreg], type_to_sse_type (ins->inst_c1));
ctx->bblocks [bb->block_num].end_bblock = cbb;
break;
}
case OP_SSE2_PSRAW_IMM:
case OP_SSE2_PSRAD_IMM:
case OP_SSE2_PSRLW_IMM:
case OP_SSE2_PSRLD_IMM:
case OP_SSE2_PSRLQ_IMM: {
LLVMValueRef value = lhs;
LLVMValueRef index = rhs;
IntrinsicId id;
// FIXME: Optimize const index case
/* Use the non-immediate version */
switch (ins->opcode) {
case OP_SSE2_PSRAW_IMM: id = INTRINS_SSE_PSRA_W; break;
case OP_SSE2_PSRAD_IMM: id = INTRINS_SSE_PSRA_D; break;
case OP_SSE2_PSRLW_IMM: id = INTRINS_SSE_PSRL_W; break;
case OP_SSE2_PSRLD_IMM: id = INTRINS_SSE_PSRL_D; break;
case OP_SSE2_PSRLQ_IMM: id = INTRINS_SSE_PSRL_Q; break;
default: g_assert_not_reached (); break;
}
LLVMTypeRef t = LLVMTypeOf (value);
LLVMValueRef index_vect = LLVMBuildInsertElement (builder, LLVMConstNull (t), convert (ctx, index, LLVMGetElementType (t)), const_int32 (0), "");
LLVMValueRef args [] = { value, index_vect };
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_SSE_SHUFPS:
case OP_SSE2_SHUFPD:
case OP_SSE2_PSHUFD:
case OP_SSE2_PSHUFHW:
case OP_SSE2_PSHUFLW: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef l = lhs;
LLVMValueRef r = rhs;
LLVMValueRef ctl = arg3;
const char *oname = "";
int ncases = 0;
switch (ins->opcode) {
case OP_SSE_SHUFPS: ncases = 256; break;
case OP_SSE2_SHUFPD: ncases = 4; break;
case OP_SSE2_PSHUFD: case OP_SSE2_PSHUFHW: case OP_SSE2_PSHUFLW: ncases = 256; r = lhs; ctl = rhs; break;
}
switch (ins->opcode) {
case OP_SSE_SHUFPS: oname = "sse_shufps"; break;
case OP_SSE2_SHUFPD: oname = "sse2_shufpd"; break;
case OP_SSE2_PSHUFD: oname = "sse2_pshufd"; break;
case OP_SSE2_PSHUFHW: oname = "sse2_pshufhw"; break;
case OP_SSE2_PSHUFLW: oname = "sse2_pshuflw"; break;
}
ctl = LLVMBuildAnd (builder, ctl, const_int32 (ncases - 1), "");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, ncases, ctl, ret_t, oname);
int mask_values [8];
int mask_len = 0;
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
switch (ins->opcode) {
case OP_SSE_SHUFPS:
mask_len = 4;
mask_values [0] = ((i >> 0) & 0x3) + 0; // take two elements from lhs
mask_values [1] = ((i >> 2) & 0x3) + 0;
mask_values [2] = ((i >> 4) & 0x3) + 4; // and two from rhs
mask_values [3] = ((i >> 6) & 0x3) + 4;
break;
case OP_SSE2_SHUFPD:
mask_len = 2;
mask_values [0] = ((i >> 0) & 0x1) + 0;
mask_values [1] = ((i >> 1) & 0x1) + 2;
break;
case OP_SSE2_PSHUFD:
/*
* Each 2 bits in mask selects 1 dword from the the source and copies it to the
* destination.
*/
mask_len = 4;
for (int j = 0; j < 4; ++j) {
int windex = (i >> (j * 2)) & 0x3;
mask_values [j] = windex;
}
break;
case OP_SSE2_PSHUFHW:
/*
* Each 2 bits in mask selects 1 word from the high quadword of the source and copies it to the
* high quadword of the destination.
*/
mask_len = 8;
/* The low quadword stays the same */
for (int j = 0; j < 4; ++j)
mask_values [j] = j;
for (int j = 0; j < 4; ++j) {
int windex = (i >> (j * 2)) & 0x3;
mask_values [j + 4] = 4 + windex;
}
break;
case OP_SSE2_PSHUFLW:
mask_len = 8;
/* The high quadword stays the same */
for (int j = 0; j < 4; ++j)
mask_values [j + 4] = j + 4;
for (int j = 0; j < 4; ++j) {
int windex = (i >> (j * 2)) & 0x3;
mask_values [j] = windex;
}
break;
}
LLVMValueRef mask = create_const_vector_i32 (mask_values, mask_len);
LLVMValueRef result = LLVMBuildShuffleVector (builder, l, r, mask, oname);
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t));
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE3_MOVDDUP: {
int mask [] = { 0, 0 };
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs,
LLVMGetUndef (LLVMTypeOf (lhs)),
create_const_vector_i32 (mask, 2), "");
break;
}
case OP_SSE3_MOVDDUP_MEM: {
LLVMValueRef undef = LLVMGetUndef (v128_r8_t);
LLVMValueRef addr = convert (ctx, lhs, LLVMPointerType (r8_t, 0));
LLVMValueRef elem = mono_llvm_build_aligned_load (builder, addr, "sse3_movddup_mem", FALSE, 1);
LLVMValueRef val = LLVMBuildInsertElement (builder, undef, elem, const_int32 (0), "sse3_movddup_mem");
values [ins->dreg] = LLVMBuildShuffleVector (builder, val, undef, LLVMConstNull (LLVMVectorType (i4_t, 2)), "sse3_movddup_mem");
break;
}
case OP_SSE3_MOVSHDUP: {
int mask [] = { 1, 1, 3, 3 };
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), "");
break;
}
case OP_SSE3_MOVSLDUP: {
int mask [] = { 0, 0, 2, 2 };
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, LLVMConstNull (LLVMTypeOf (lhs)), create_const_vector_i32 (mask, 4), "");
break;
}
case OP_SSSE3_SHUFFLE: {
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PSHUFB, args, dname);
break;
}
case OP_SSSE3_ABS: {
// %sub = sub <16 x i8> zeroinitializer, %arg
// %cmp = icmp sgt <16 x i8> %arg, zeroinitializer
// %abs = select <16 x i1> %cmp, <16 x i8> %arg, <16 x i8> %sub
LLVMTypeRef typ = type_to_sse_type (ins->inst_c1);
LLVMValueRef sub = LLVMBuildSub(builder, LLVMConstNull(typ), lhs, "");
LLVMValueRef cmp = LLVMBuildICmp(builder, LLVMIntSGT, lhs, LLVMConstNull(typ), "");
LLVMValueRef abs = LLVMBuildSelect (builder, cmp, lhs, sub, "");
values [ins->dreg] = convert (ctx, abs, typ);
break;
}
case OP_SSSE3_ALIGNR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef zero = LLVMConstNull (v128_i1_t);
LLVMValueRef hivec = convert (ctx, lhs, v128_i1_t);
LLVMValueRef lovec = convert (ctx, rhs, v128_i1_t);
LLVMValueRef rshift_amount = convert (ctx, arg3, i1_t);
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 32, rshift_amount, v128_i1_t, "ssse3_alignr");
LLVMValueRef mask_values [16]; // 128-bit vector, 8-bit elements, 16 total elements
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
LLVMValueRef hi = NULL;
LLVMValueRef lo = NULL;
if (i <= 16) {
for (int j = 0; j < 16; j++)
mask_values [j] = const_int32 (i + j);
lo = lovec;
hi = hivec;
} else {
for (int j = 0; j < 16; j++)
mask_values [j] = const_int32 (i + j - 16);
lo = hivec;
hi = zero;
}
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, lo, hi, LLVMConstVector (mask_values, 16), "ssse3_alignr");
immediate_unroll_commit (&ictx, i, shuffled);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, zero);
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
values [ins->dreg] = convert (ctx, result, ret_t);
break;
}
case OP_SSE41_ROUNDP: {
LLVMValueRef args [] = { lhs, LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE) };
values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDPS : INTRINS_SSE_ROUNDPD, args, dname);
break;
}
case OP_SSE41_ROUNDS: {
LLVMValueRef args [3];
args [0] = lhs;
args [1] = rhs;
args [2] = LLVMConstInt (LLVMInt32Type (), ins->inst_c0, FALSE);
values [ins->dreg] = call_intrins (ctx, ins->inst_c1 == MONO_TYPE_R4 ? INTRINS_SSE_ROUNDSS : INTRINS_SSE_ROUNDSD, args, dname);
break;
}
case OP_SSE41_DPPS:
case OP_SSE41_DPPD: {
/* Bits 0, 1, 4, 5 are meaningful for the control mask
* in dppd; all bits are meaningful for dpps.
*/
LLVMTypeRef ret_t = NULL;
LLVMValueRef mask = NULL;
int mask_bits = 0;
int high_shift = 0;
int low_mask = 0;
IntrinsicId iid = (IntrinsicId) 0;
const char *oname = "";
switch (ins->opcode) {
case OP_SSE41_DPPS:
ret_t = v128_r4_t;
mask = const_int8 (0xff); // 0b11111111
mask_bits = 8;
high_shift = 4;
low_mask = 0xf;
iid = INTRINS_SSE_DPPS;
oname = "sse41_dpps";
break;
case OP_SSE41_DPPD:
ret_t = v128_r8_t;
mask = const_int8 (0x33); // 0b00110011
mask_bits = 4;
high_shift = 2;
low_mask = 0x3;
iid = INTRINS_SSE_DPPD;
oname = "sse41_dppd";
break;
}
LLVMValueRef args [] = { lhs, rhs, NULL };
LLVMValueRef index = LLVMBuildAnd (builder, convert (ctx, arg3, i1_t), mask, oname);
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << mask_bits, index, ret_t, oname);
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int imm = ((i >> high_shift) << 4) | (i & low_mask);
args [2] = const_int8 (imm);
LLVMValueRef result = call_intrins (ctx, iid, args, dname);
immediate_unroll_commit (&ictx, imm, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t));
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_MPSADBW: {
LLVMValueRef args [] = {
convert (ctx, lhs, sse_i1_t),
convert (ctx, rhs, sse_i1_t),
NULL,
};
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
// Only 3 bits (bits 0-2) are used by mpsadbw and llvm.x86.sse41.mpsadbw
int used_bits = 0x7;
ctl = LLVMBuildAnd (builder, ctl, const_int8 (used_bits), "sse41_mpsadbw");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, used_bits + 1, ctl, v128_i2_t, "sse41_mpsadbw");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
args [2] = const_int8 (i);
LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_MPSADBW, args, "sse41_mpsadbw");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_unreachable_default (&ictx);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_INSERTPS: {
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
LLVMValueRef args [] = { lhs, rhs, NULL };
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, ctl, v128_r4_t, "sse41_insertps");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
args [2] = const_int8 (i);
LLVMValueRef result = call_intrins (ctx, INTRINS_SSE_INSERTPS, args, dname);
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_unreachable_default (&ictx);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_BLEND: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
int nelem = LLVMGetVectorSize (ret_t);
g_assert (nelem >= 2 && nelem <= 8); // I2, U2, R4, R8
int unique_ctl_patterns = 1 << nelem;
int ctlmask = unique_ctl_patterns - 1;
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
ctl = LLVMBuildAnd (builder, ctl, const_int8 (ctlmask), "sse41_blend");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, unique_ctl_patterns, ctl, ret_t, "sse41_blend");
int i = 0;
int mask_values [MAX_VECTOR_ELEMS] = { 0 };
while (immediate_unroll_next (&ictx, &i)) {
for (int lane = 0; lane < nelem; ++lane) {
// n-bit in inst_c0 (control byte) is set to 1
gboolean bit_set = (i & (1 << lane)) >> lane;
mask_values [lane] = lane + (bit_set ? nelem : 0);
}
LLVMValueRef mask = create_const_vector_i32 (mask_values, nelem);
LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "sse41_blend");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, LLVMGetUndef (ret_t));
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_SSE41_BLENDV: {
LLVMValueRef args [] = { lhs, rhs, values [ins->sreg3] };
if (ins->inst_c1 == MONO_TYPE_R4) {
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPS, args, dname);
} else if (ins->inst_c1 == MONO_TYPE_R8) {
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_BLENDVPD, args, dname);
} else {
// for other non-fp type just convert to <16 x i8> and pass to @llvm.x86.sse41.pblendvb
args [0] = LLVMBuildBitCast (ctx->builder, args [0], sse_i1_t, "");
args [1] = LLVMBuildBitCast (ctx->builder, args [1], sse_i1_t, "");
args [2] = LLVMBuildBitCast (ctx->builder, args [2], sse_i1_t, "");
values [ins->dreg] = call_intrins (ctx, INTRINS_SSE_PBLENDVB, args, dname);
}
break;
}
case OP_SSE_CVTII: {
gboolean is_signed = (ins->inst_c1 == MONO_TYPE_I1) ||
(ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_I4);
LLVMTypeRef vec_type;
if ((ins->inst_c1 == MONO_TYPE_I1) || (ins->inst_c1 == MONO_TYPE_U1))
vec_type = sse_i1_t;
else if ((ins->inst_c1 == MONO_TYPE_I2) || (ins->inst_c1 == MONO_TYPE_U2))
vec_type = sse_i2_t;
else
vec_type = sse_i4_t;
LLVMValueRef value;
if (LLVMGetTypeKind (LLVMTypeOf (lhs)) != LLVMVectorTypeKind) {
LLVMValueRef bitcasted = LLVMBuildBitCast (ctx->builder, lhs, LLVMPointerType (vec_type, 0), "");
value = mono_llvm_build_aligned_load (builder, bitcasted, "", FALSE, 1);
} else {
value = LLVMBuildBitCast (ctx->builder, lhs, vec_type, "");
}
LLVMValueRef mask_vec;
LLVMTypeRef dst_type;
if (ins->inst_c0 == MONO_TYPE_I2) {
mask_vec = create_const_vector_i32 (mask_0_incr_1, 8);
dst_type = sse_i2_t;
} else if (ins->inst_c0 == MONO_TYPE_I4) {
mask_vec = create_const_vector_i32 (mask_0_incr_1, 4);
dst_type = sse_i4_t;
} else {
g_assert (ins->inst_c0 == MONO_TYPE_I8);
mask_vec = create_const_vector_i32 (mask_0_incr_1, 2);
dst_type = sse_i8_t;
}
LLVMValueRef shuffled = LLVMBuildShuffleVector (builder, value,
LLVMGetUndef (vec_type), mask_vec, "");
if (is_signed)
values [ins->dreg] = LLVMBuildSExt (ctx->builder, shuffled, dst_type, "");
else
values [ins->dreg] = LLVMBuildZExt (ctx->builder, shuffled, dst_type, "");
break;
}
case OP_SSE41_LOADANT: {
LLVMValueRef dst_ptr = convert (ctx, lhs, LLVMPointerType (primitive_type_to_llvm_type (inst_c1_type (ins)), 0));
LLVMValueRef dst_vec = LLVMBuildBitCast (builder, dst_ptr, LLVMPointerType (type_to_sse_type (ins->inst_c1), 0), "");
LLVMValueRef load = mono_llvm_build_aligned_load (builder, dst_vec, "", FALSE, 16);
set_nontemporal_flag (load);
values [ins->dreg] = load;
break;
}
case OP_SSE41_MUL: {
const int shift_vals [] = { 32, 32 };
const LLVMValueRef args [] = {
convert (ctx, lhs, sse_i8_t),
convert (ctx, rhs, sse_i8_t),
};
LLVMValueRef mul_args [2] = { 0 };
LLVMValueRef shift_vec = create_const_vector (LLVMInt64Type (), shift_vals, 2);
for (int i = 0; i < 2; ++i) {
LLVMValueRef padded = LLVMBuildShl (builder, args [i], shift_vec, "");
mul_args[i] = mono_llvm_build_exact_ashr (builder, padded, shift_vec);
}
values [ins->dreg] = LLVMBuildNSWMul (builder, mul_args [0], mul_args [1], dname);
break;
}
case OP_SSE41_MULLO: {
values [ins->dreg] = LLVMBuildMul (ctx->builder, lhs, rhs, "");
break;
}
case OP_SSE42_CRC32:
case OP_SSE42_CRC64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = convert (ctx, rhs, primitive_type_to_llvm_type (ins->inst_c0));
IntrinsicId id;
switch (ins->inst_c0) {
case MONO_TYPE_U1: id = INTRINS_SSE_CRC32_32_8; break;
case MONO_TYPE_U2: id = INTRINS_SSE_CRC32_32_16; break;
case MONO_TYPE_U4: id = INTRINS_SSE_CRC32_32_32; break;
case MONO_TYPE_U8: id = INTRINS_SSE_CRC32_64_64; break;
default: g_assert_not_reached (); break;
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_PCLMULQDQ: {
LLVMValueRef args [] = { lhs, rhs, NULL };
LLVMValueRef ctl = convert (ctx, arg3, i1_t);
// Only bits 0 and 4 of the immediate operand are used by PCLMULQDQ.
ctl = LLVMBuildAnd (builder, ctl, const_int8 (0x11), "pclmulqdq");
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 1 << 2, ctl, v128_i8_t, "pclmulqdq");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int imm = ((i & 0x2) << 3) | (i & 0x1);
args [2] = const_int8 (imm);
LLVMValueRef result = call_intrins (ctx, INTRINS_PCLMULQDQ, args, "pclmulqdq");
immediate_unroll_commit (&ictx, imm, result);
}
immediate_unroll_unreachable_default (&ictx);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_AES_KEYGENASSIST: {
LLVMValueRef roundconstant = convert (ctx, rhs, i1_t);
LLVMValueRef args [] = { convert (ctx, lhs, v128_i8_t), NULL };
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, 256, roundconstant, v128_i8_t, "aes_keygenassist");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
args [1] = const_int8 (i);
LLVMValueRef result = call_intrins (ctx, INTRINS_AESNI_AESKEYGENASSIST, args, "aes_keygenassist");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_unreachable_default (&ictx);
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
values [ins->dreg] = convert (ctx, result, v128_i1_t);
break;
}
#endif
case OP_XCOMPARE_FP: {
LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0];
LLVMValueRef cmp = LLVMBuildFCmp (builder, pred, lhs, rhs, "");
int nelems = LLVMGetVectorSize (LLVMTypeOf (cmp));
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
if (ins->inst_c1 == MONO_TYPE_R8)
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt64Type (), nelems), ""), LLVMTypeOf (lhs), "");
else
values [ins->dreg] = LLVMBuildBitCast (builder, LLVMBuildSExt (builder, cmp, LLVMVectorType (LLVMInt32Type (), nelems), ""), LLVMTypeOf (lhs), "");
break;
}
case OP_XCOMPARE: {
LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0];
LLVMValueRef cmp = LLVMBuildICmp (builder, pred, lhs, rhs, "");
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
values [ins->dreg] = LLVMBuildSExt (builder, cmp, LLVMTypeOf (lhs), "");
break;
}
case OP_POPCNT32:
values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I32, &lhs, "");
break;
case OP_POPCNT64:
values [ins->dreg] = call_intrins (ctx, INTRINS_CTPOP_I64, &lhs, "");
break;
case OP_CTTZ32:
case OP_CTTZ64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE);
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_CTTZ32 ? INTRINS_CTTZ_I32 : INTRINS_CTTZ_I64, args, "");
break;
}
case OP_BMI1_BEXTR32:
case OP_BMI1_BEXTR64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = convert (ctx, rhs, ins->opcode == OP_BMI1_BEXTR32 ? i4_t : i8_t); // cast ushort to u32/u64
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BMI1_BEXTR32 ? INTRINS_BEXTR_I32 : INTRINS_BEXTR_I64, args, "");
break;
}
case OP_BZHI32:
case OP_BZHI64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = rhs;
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_BZHI32 ? INTRINS_BZHI_I32 : INTRINS_BZHI_I64, args, "");
break;
}
case OP_MULX_H32:
case OP_MULX_H64:
case OP_MULX_HL32:
case OP_MULX_HL64: {
gboolean is_64 = ins->opcode == OP_MULX_H64 || ins->opcode == OP_MULX_HL64;
gboolean only_high = ins->opcode == OP_MULX_H32 || ins->opcode == OP_MULX_H64;
LLVMValueRef lx = LLVMBuildZExt (ctx->builder, lhs, LLVMInt128Type (), "");
LLVMValueRef rx = LLVMBuildZExt (ctx->builder, rhs, LLVMInt128Type (), "");
LLVMValueRef mulx = LLVMBuildMul (ctx->builder, lx, rx, "");
if (!only_high) {
LLVMValueRef addr = convert (ctx, arg3, LLVMPointerType (is_64 ? i8_t : i4_t, 0));
LLVMValueRef lowx = LLVMBuildTrunc (ctx->builder, mulx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), "");
LLVMBuildStore (ctx->builder, lowx, addr);
}
LLVMValueRef shift = LLVMConstInt (LLVMInt128Type (), is_64 ? 64 : 32, FALSE);
LLVMValueRef highx = LLVMBuildLShr (ctx->builder, mulx, shift, "");
values [ins->dreg] = LLVMBuildTrunc (ctx->builder, highx, is_64 ? LLVMInt64Type () : LLVMInt32Type (), "");
break;
}
case OP_PEXT32:
case OP_PEXT64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = rhs;
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PEXT32 ? INTRINS_PEXT_I32 : INTRINS_PEXT_I64, args, "");
break;
}
case OP_PDEP32:
case OP_PDEP64: {
LLVMValueRef args [2];
args [0] = lhs;
args [1] = rhs;
values [ins->dreg] = call_intrins (ctx, ins->opcode == OP_PDEP32 ? INTRINS_PDEP_I32 : INTRINS_PDEP_I64, args, "");
break;
}
#endif /* defined(TARGET_X86) || defined(TARGET_AMD64) */
// Shared between ARM64 and X86
#if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64)
case OP_LZCNT32:
case OP_LZCNT64: {
IntrinsicId iid = ins->opcode == OP_LZCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64;
LLVMValueRef args [] = { lhs, const_int1 (FALSE) };
values [ins->dreg] = call_intrins (ctx, iid, args, "");
break;
}
#endif
#if defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_WASM)
case OP_XEQUAL: {
LLVMTypeRef t;
LLVMValueRef cmp, mask [MAX_VECTOR_ELEMS], shuffle;
int nelems;
#if defined(TARGET_WASM)
/* The wasm code generator doesn't understand the shuffle/and code sequence below */
LLVMValueRef val;
if (LLVMIsNull (lhs) || LLVMIsNull (rhs)) {
val = LLVMIsNull (lhs) ? rhs : lhs;
nelems = LLVMGetVectorSize (LLVMTypeOf (lhs));
IntrinsicId intrins = (IntrinsicId)0;
switch (nelems) {
case 16:
intrins = INTRINS_WASM_ANYTRUE_V16;
break;
case 8:
intrins = INTRINS_WASM_ANYTRUE_V8;
break;
case 4:
intrins = INTRINS_WASM_ANYTRUE_V4;
break;
case 2:
intrins = INTRINS_WASM_ANYTRUE_V2;
break;
default:
g_assert_not_reached ();
}
/* res = !wasm.anytrue (val) */
values [ins->dreg] = call_intrins (ctx, intrins, &val, "");
values [ins->dreg] = LLVMBuildZExt (builder, LLVMBuildICmp (builder, LLVMIntEQ, values [ins->dreg], LLVMConstInt (LLVMInt32Type (), 0, FALSE), ""), LLVMInt32Type (), dname);
break;
}
#endif
LLVMTypeRef srcelemt = LLVMGetElementType (LLVMTypeOf (lhs));
//%c = icmp sgt <16 x i8> %a0, %a1
if (srcelemt == LLVMDoubleType () || srcelemt == LLVMFloatType ())
cmp = LLVMBuildFCmp (builder, LLVMRealOEQ, lhs, rhs, "");
else
cmp = LLVMBuildICmp (builder, LLVMIntEQ, lhs, rhs, "");
nelems = LLVMGetVectorSize (LLVMTypeOf (cmp));
LLVMTypeRef elemt;
if (srcelemt == LLVMDoubleType ())
elemt = LLVMInt64Type ();
else if (srcelemt == LLVMFloatType ())
elemt = LLVMInt32Type ();
else
elemt = srcelemt;
t = LLVMVectorType (elemt, nelems);
cmp = LLVMBuildSExt (builder, cmp, t, "");
// cmp is a <nelems x elemt> vector, each element is either 0xff... or 0
int half = nelems / 2;
while (half >= 1) {
// AND the top and bottom halfes into the bottom half
for (int i = 0; i < half; ++i)
mask [i] = LLVMConstInt (LLVMInt32Type (), half + i, FALSE);
for (int i = half; i < nelems; ++i)
mask [i] = LLVMConstInt (LLVMInt32Type (), 0, FALSE);
shuffle = LLVMBuildShuffleVector (builder, cmp, LLVMGetUndef (t), LLVMConstVector (mask, LLVMGetVectorSize (t)), "");
cmp = LLVMBuildAnd (builder, cmp, shuffle, "");
half = half / 2;
}
// Extract [0]
LLVMValueRef first_elem = LLVMBuildExtractElement (builder, cmp, LLVMConstInt (LLVMInt32Type (), 0, FALSE), "");
// convert to 0/1
LLVMValueRef cmp_zero = LLVMBuildICmp (builder, LLVMIntNE, first_elem, LLVMConstInt (elemt, 0, FALSE), "");
values [ins->dreg] = LLVMBuildZExt (builder, cmp_zero, LLVMInt8Type (), "");
break;
}
#endif
#if defined(TARGET_ARM64)
case OP_XOP_I4_I4:
case OP_XOP_I8_I8: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
values [ins->dreg] = call_intrins (ctx, id, &lhs, "");
break;
}
case OP_XOP_X_X_X:
case OP_XOP_I4_I4_I4:
case OP_XOP_I4_I4_I8: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
gboolean zext_last = FALSE, bitcast_result = FALSE, getElement = FALSE;
int element_idx = -1;
switch (id) {
case INTRINS_AARCH64_PMULL64:
getElement = TRUE;
bitcast_result = TRUE;
element_idx = ins->inst_c1;
break;
case INTRINS_AARCH64_CRC32B:
case INTRINS_AARCH64_CRC32H:
case INTRINS_AARCH64_CRC32W:
case INTRINS_AARCH64_CRC32CB:
case INTRINS_AARCH64_CRC32CH:
case INTRINS_AARCH64_CRC32CW:
zext_last = TRUE;
break;
default:
break;
}
LLVMValueRef arg1 = rhs;
if (zext_last)
arg1 = LLVMBuildZExt (ctx->builder, arg1, LLVMInt32Type (), "");
LLVMValueRef args [] = { lhs, arg1 };
if (getElement) {
args [0] = LLVMBuildExtractElement (ctx->builder, args [0], const_int32 (element_idx), "");
args [1] = LLVMBuildExtractElement (ctx->builder, args [1], const_int32 (element_idx), "");
}
values [ins->dreg] = call_intrins (ctx, id, args, "");
if (bitcast_result)
values [ins->dreg] = convert (ctx, values [ins->dreg], LLVMVectorType (LLVMInt64Type (), 2));
break;
}
case OP_XOP_X_X_X_X: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
gboolean getLowerElement = FALSE;
int arg_idx = -1;
switch (id) {
case INTRINS_AARCH64_SHA1C:
case INTRINS_AARCH64_SHA1M:
case INTRINS_AARCH64_SHA1P:
getLowerElement = TRUE;
arg_idx = 1;
break;
default:
break;
}
LLVMValueRef args [] = { lhs, rhs, arg3 };
if (getLowerElement)
args [arg_idx] = LLVMBuildExtractElement (ctx->builder, args [arg_idx], const_int32 (0), "");
values [ins->dreg] = call_intrins (ctx, id, args, "");
break;
}
case OP_XOP_X_X: {
IntrinsicId id = (IntrinsicId)ins->inst_c0;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean getLowerElement = FALSE;
switch (id) {
case INTRINS_AARCH64_SHA1H: getLowerElement = TRUE; break;
default: break;
}
LLVMValueRef arg0 = lhs;
if (getLowerElement)
arg0 = LLVMBuildExtractElement (ctx->builder, arg0, const_int32 (0), "");
LLVMValueRef result = call_intrins (ctx, id, &arg0, "");
if (getLowerElement)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_XCOMPARE_FP_SCALAR:
case OP_XCOMPARE_FP: {
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
gboolean scalar = ins->opcode == OP_XCOMPARE_FP_SCALAR;
LLVMRealPredicate pred = fpcond_to_llvm_cond [ins->inst_c0];
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMTypeRef reti_t = to_integral_vector_type (ret_t);
LLVMValueRef args [] = { lhs, rhs };
if (scalar)
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
LLVMValueRef result = LLVMBuildFCmp (builder, pred, args [0], args [1], "xcompare_fp");
if (scalar)
result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (reti_t)), result);
result = LLVMBuildSExt (builder, result, reti_t, "");
result = LLVMBuildBitCast (builder, result, ret_t, "");
values [ins->dreg] = result;
break;
}
case OP_XCOMPARE_SCALAR:
case OP_XCOMPARE: {
g_assert (LLVMTypeOf (lhs) == LLVMTypeOf (rhs));
gboolean scalar = ins->opcode == OP_XCOMPARE_SCALAR;
LLVMIntPredicate pred = cond_to_llvm_cond [ins->inst_c0];
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef args [] = { lhs, rhs };
if (scalar)
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
LLVMValueRef result = LLVMBuildICmp (builder, pred, args [0], args [1], "xcompare");
if (scalar)
result = vector_from_scalar (ctx, LLVMVectorType (LLVMIntType (1), LLVMGetVectorSize (ret_t)), result);
values [ins->dreg] = LLVMBuildSExt (builder, result, ret_t, "");
break;
}
case OP_ARM64_EXT: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (ret_t);
g_assert (elems <= ARM64_MAX_VECTOR_ELEMS);
LLVMValueRef index = arg3;
LLVMValueRef default_value = lhs;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, elems, index, ret_t, "arm64_ext");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
LLVMValueRef mask = create_const_vector_i32 (&mask_0_incr_1 [i], elems);
LLVMValueRef result = LLVMBuildShuffleVector (builder, lhs, rhs, mask, "arm64_ext");
immediate_unroll_commit (&ictx, i, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, default_value);
values [ins->dreg] = immediate_unroll_end (&ictx, &cbb);
break;
}
case OP_ARM64_MVN: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef result = bitcast_to_integral (ctx, lhs);
result = LLVMBuildNot (builder, result, "arm64_mvn");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_BIC: {
LLVMTypeRef ret_t = LLVMTypeOf (lhs);
LLVMValueRef result = bitcast_to_integral (ctx, lhs);
LLVMValueRef mask = bitcast_to_integral (ctx, rhs);
mask = LLVMBuildNot (builder, mask, "");
result = LLVMBuildAnd (builder, mask, result, "arm64_bic");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_BSL: {
LLVMTypeRef ret_t = LLVMTypeOf (rhs);
LLVMValueRef select = bitcast_to_integral (ctx, lhs);
LLVMValueRef left = bitcast_to_integral (ctx, rhs);
LLVMValueRef right = bitcast_to_integral (ctx, arg3);
LLVMValueRef result1 = LLVMBuildAnd (builder, select, left, "arm64_bsl");
LLVMValueRef result2 = LLVMBuildAnd (builder, LLVMBuildNot (builder, select, ""), right, "");
LLVMValueRef result = LLVMBuildOr (builder, result1, result2, "");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_CMTST: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef l = bitcast_to_integral (ctx, lhs);
LLVMValueRef r = bitcast_to_integral (ctx, rhs);
LLVMValueRef result = LLVMBuildAnd (builder, l, r, "arm64_cmtst");
LLVMTypeRef t = LLVMTypeOf (l);
result = LLVMBuildICmp (builder, LLVMIntNE, result, LLVMConstNull (t), "");
result = LLVMBuildSExt (builder, result, t, "");
result = convert (ctx, result, ret_t);
values [ins->dreg] = result;
break;
}
case OP_ARM64_FCVTL:
case OP_ARM64_FCVTL2: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean high = ins->opcode == OP_ARM64_FCVTL2;
LLVMValueRef result = lhs;
if (high)
result = extract_high_elements (ctx, result);
result = LLVMBuildFPExt (builder, result, ret_t, "arm64_fcvtl");
values [ins->dreg] = result;
break;
}
case OP_ARM64_FCVTXN:
case OP_ARM64_FCVTXN2:
case OP_ARM64_FCVTN:
case OP_ARM64_FCVTN2: {
gboolean high = FALSE;
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_FCVTXN2: high = TRUE; case OP_ARM64_FCVTXN: iid = INTRINS_AARCH64_ADV_SIMD_FCVTXN; break;
case OP_ARM64_FCVTN2: high = TRUE; break;
}
LLVMValueRef result = lhs;
if (high)
result = rhs;
if (iid)
result = call_intrins (ctx, iid, &result, "");
else
result = LLVMBuildFPTrunc (builder, result, v64_r4_t, "");
if (high)
result = concatenate_vectors (ctx, lhs, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_UCVTF:
case OP_ARM64_SCVTF:
case OP_ARM64_UCVTF_SCALAR:
case OP_ARM64_SCVTF_SCALAR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean scalar = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_UCVTF_SCALAR: scalar = TRUE; case OP_ARM64_UCVTF: is_unsigned = TRUE; break;
case OP_ARM64_SCVTF_SCALAR: scalar = TRUE; break;
}
LLVMValueRef result = lhs;
LLVMTypeRef cvt_t = ret_t;
if (scalar) {
result = scalar_from_vector (ctx, result);
cvt_t = LLVMGetElementType (ret_t);
}
if (is_unsigned)
result = LLVMBuildUIToFP (builder, result, cvt_t, "arm64_ucvtf");
else
result = LLVMBuildSIToFP (builder, result, cvt_t, "arm64_scvtf");
if (scalar)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_FCVTZS:
case OP_ARM64_FCVTZS_SCALAR:
case OP_ARM64_FCVTZU:
case OP_ARM64_FCVTZU_SCALAR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean scalar = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_FCVTZU_SCALAR: scalar = TRUE; case OP_ARM64_FCVTZU: is_unsigned = TRUE; break;
case OP_ARM64_FCVTZS_SCALAR: scalar = TRUE; break;
}
LLVMValueRef result = lhs;
LLVMTypeRef cvt_t = ret_t;
if (scalar) {
result = scalar_from_vector (ctx, result);
cvt_t = LLVMGetElementType (ret_t);
}
if (is_unsigned)
result = LLVMBuildFPToUI (builder, result, cvt_t, "arm64_fcvtzu");
else
result = LLVMBuildFPToSI (builder, result, cvt_t, "arm64_fcvtzs");
if (scalar)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SELECT_SCALAR: {
LLVMValueRef result = LLVMBuildExtractElement (builder, lhs, rhs, "");
LLVMTypeRef elem_t = LLVMTypeOf (result);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef t = LLVMVectorType (elem_t, 64 / elem_bits);
result = vector_from_scalar (ctx, t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SELECT_QUAD: {
LLVMTypeRef src_type = simd_class_to_llvm_type (ctx, ins->data.op [1].klass);
LLVMTypeRef ret_type = simd_class_to_llvm_type (ctx, ins->klass);
unsigned int src_type_bits = mono_llvm_get_prim_size_bits (src_type);
unsigned int ret_type_bits = mono_llvm_get_prim_size_bits (ret_type);
unsigned int src_intermediate_elems = src_type_bits / 32;
unsigned int ret_intermediate_elems = ret_type_bits / 32;
LLVMTypeRef intermediate_type = LLVMVectorType (i4_t, src_intermediate_elems);
LLVMValueRef result = LLVMBuildBitCast (builder, lhs, intermediate_type, "arm64_select_quad");
result = LLVMBuildExtractElement (builder, result, rhs, "arm64_select_quad");
result = broadcast_element (ctx, result, ret_intermediate_elems);
result = LLVMBuildBitCast (builder, result, ret_type, "arm64_select_quad");
values [ins->dreg] = result;
break;
}
case OP_LSCNT32:
case OP_LSCNT64: {
// %shr = ashr i32 %x, 31
// %xor = xor i32 %shr, %x
// %mul = shl i32 %xor, 1
// %add = or i32 %mul, 1
// %0 = tail call i32 @llvm.ctlz.i32(i32 %add, i1 false)
LLVMValueRef shr = LLVMBuildAShr (builder, lhs, ins->opcode == OP_LSCNT32 ?
LLVMConstInt (LLVMInt32Type (), 31, FALSE) :
LLVMConstInt (LLVMInt64Type (), 63, FALSE), "");
LLVMValueRef one = ins->opcode == OP_LSCNT32 ?
LLVMConstInt (LLVMInt32Type (), 1, FALSE) :
LLVMConstInt (LLVMInt64Type (), 1, FALSE);
LLVMValueRef xor = LLVMBuildXor (builder, shr, lhs, "");
LLVMValueRef mul = LLVMBuildShl (builder, xor, one, "");
LLVMValueRef add = LLVMBuildOr (builder, mul, one, "");
LLVMValueRef args [2];
args [0] = add;
args [1] = LLVMConstInt (LLVMInt1Type (), 0, FALSE);
values [ins->dreg] = LLVMBuildCall (builder, get_intrins (ctx, ins->opcode == OP_LSCNT32 ? INTRINS_CTLZ_I32 : INTRINS_CTLZ_I64), args, 2, "");
break;
}
case OP_ARM64_SQRDMLAH:
case OP_ARM64_SQRDMLAH_BYSCALAR:
case OP_ARM64_SQRDMLAH_SCALAR:
case OP_ARM64_SQRDMLSH:
case OP_ARM64_SQRDMLSH_BYSCALAR:
case OP_ARM64_SQRDMLSH_SCALAR: {
gboolean byscalar = FALSE;
gboolean scalar = FALSE;
gboolean subtract = FALSE;
switch (ins->opcode) {
case OP_ARM64_SQRDMLAH_BYSCALAR: byscalar = TRUE; break;
case OP_ARM64_SQRDMLAH_SCALAR: scalar = TRUE; break;
case OP_ARM64_SQRDMLSH: subtract = TRUE; break;
case OP_ARM64_SQRDMLSH_BYSCALAR: subtract = TRUE; byscalar = TRUE; break;
case OP_ARM64_SQRDMLSH_SCALAR: subtract = TRUE; scalar = TRUE; break;
}
int acc_iid = subtract ? INTRINS_AARCH64_ADV_SIMD_SQSUB : INTRINS_AARCH64_ADV_SIMD_SQADD;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t);
ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins);
LLVMValueRef args [] = { lhs, rhs, arg3 };
if (byscalar) {
unsigned int elems = LLVMGetVectorSize (ret_t);
args [2] = broadcast_element (ctx, scalar_from_vector (ctx, args [2]), elems);
}
if (scalar) {
ovr_tag = sctx.ovr_tag;
scalar_op_from_vector_op_process_args (&sctx, args, 3);
}
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQRDMULH, ovr_tag, &args [1], "arm64_sqrdmlxh");
args [1] = result;
result = call_overloaded_intrins (ctx, acc_iid, ovr_tag, &args [0], "arm64_sqrdmlxh");
if (scalar)
result = scalar_op_from_vector_op_process_result (&sctx, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SMULH:
case OP_ARM64_UMULH: {
LLVMValueRef op1, op2;
if (ins->opcode == OP_ARM64_SMULH) {
op1 = LLVMBuildSExt (builder, lhs, LLVMInt128Type (), "");
op2 = LLVMBuildSExt (builder, rhs, LLVMInt128Type (), "");
} else {
op1 = LLVMBuildZExt (builder, lhs, LLVMInt128Type (), "");
op2 = LLVMBuildZExt (builder, rhs, LLVMInt128Type (), "");
}
LLVMValueRef mul = LLVMBuildMul (builder, op1, op2, "");
LLVMValueRef hi64 = LLVMBuildLShr (builder, mul,
LLVMConstInt (LLVMInt128Type (), 64, FALSE), "");
values [ins->dreg] = LLVMBuildTrunc (builder, hi64, LLVMInt64Type (), "");
break;
}
case OP_ARM64_XNARROW_SCALAR: {
// Unfortunately, @llvm.aarch64.neon.scalar.sqxtun isn't available for i8 or i16.
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (ret_t);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
LLVMValueRef result = NULL;
int iid = ins->inst_c0;
int scalar_iid = 0;
switch (iid) {
case INTRINS_AARCH64_ADV_SIMD_SQXTUN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTUN; break;
case INTRINS_AARCH64_ADV_SIMD_SQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_SQXTN; break;
case INTRINS_AARCH64_ADV_SIMD_UQXTN: scalar_iid = INTRINS_AARCH64_ADV_SIMD_SCALAR_UQXTN; break;
default: g_assert_not_reached ();
}
if (elem_t == i4_t) {
LLVMValueRef arg = scalar_from_vector (ctx, lhs);
result = call_intrins (ctx, scalar_iid, &arg, "arm64_xnarrow_scalar");
result = vector_from_scalar (ctx, ret_t, result);
} else {
LLVMTypeRef arg_t = LLVMTypeOf (lhs);
LLVMTypeRef argelem_t = LLVMGetElementType (arg_t);
unsigned int argelems = LLVMGetVectorSize (arg_t);
LLVMValueRef arg = keep_lowest_element (ctx, LLVMVectorType (argelem_t, argelems * 2), lhs);
result = call_overloaded_intrins (ctx, iid, ovr_tag, &arg, "arm64_xnarrow_scalar");
result = keep_lowest_element (ctx, LLVMTypeOf (result), result);
}
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQXTUN2:
case OP_ARM64_UQXTN2:
case OP_ARM64_SQXTN2:
case OP_ARM64_XTN:
case OP_ARM64_XTN2: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean high = FALSE;
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_SQXTUN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTUN; break;
case OP_ARM64_UQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_UQXTN; break;
case OP_ARM64_SQXTN2: high = TRUE; iid = INTRINS_AARCH64_ADV_SIMD_SQXTN; break;
case OP_ARM64_XTN2: high = TRUE; break;
}
LLVMValueRef result = lhs;
if (high) {
result = rhs;
ovr_tag = ovr_tag_smaller_vector (ovr_tag);
}
LLVMTypeRef t = LLVMTypeOf (result);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits / 2), elems);
if (iid != 0)
result = call_overloaded_intrins (ctx, iid, ovr_tag, &result, "");
else
result = LLVMBuildTrunc (builder, result, result_t, "arm64_xtn");
if (high)
result = concatenate_vectors (ctx, lhs, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_CLZ: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef args [] = { lhs, const_int1 (0) };
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_CLZ, ovr_tag, args, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_FMSUB:
case OP_ARM64_FMSUB_BYSCALAR:
case OP_ARM64_FMSUB_SCALAR:
case OP_ARM64_FNMSUB_SCALAR:
case OP_ARM64_FMADD:
case OP_ARM64_FMADD_BYSCALAR:
case OP_ARM64_FMADD_SCALAR:
case OP_ARM64_FNMADD_SCALAR: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean scalar = FALSE;
gboolean negate = FALSE;
gboolean subtract = FALSE;
gboolean byscalar = FALSE;
switch (ins->opcode) {
case OP_ARM64_FMSUB: subtract = TRUE; break;
case OP_ARM64_FMSUB_BYSCALAR: subtract = TRUE; byscalar = TRUE; break;
case OP_ARM64_FMSUB_SCALAR: subtract = TRUE; scalar = TRUE; break;
case OP_ARM64_FNMSUB_SCALAR: subtract = TRUE; scalar = TRUE; negate = TRUE; break;
case OP_ARM64_FMADD: break;
case OP_ARM64_FMADD_BYSCALAR: byscalar = TRUE; break;
case OP_ARM64_FMADD_SCALAR: scalar = TRUE; break;
case OP_ARM64_FNMADD_SCALAR: scalar = TRUE; negate = TRUE; break;
}
// llvm.fma argument order: mulop1, mulop2, addend
LLVMValueRef args [] = { rhs, arg3, lhs };
if (byscalar) {
unsigned int elems = LLVMGetVectorSize (LLVMTypeOf (args [0]));
args [1] = broadcast_element (ctx, scalar_from_vector (ctx, args [1]), elems);
}
if (scalar) {
ovr_tag = ovr_tag_force_scalar (ovr_tag);
for (int i = 0; i < 3; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
}
if (subtract)
args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_sub");
if (negate) {
args [0] = LLVMBuildFNeg (builder, args [0], "arm64_fma_negate");
args [2] = LLVMBuildFNeg (builder, args [2], "arm64_fma_negate");
}
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_FMA, ovr_tag, args, "arm64_fma");
if (scalar)
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQDMULL:
case OP_ARM64_SQDMULL_BYSCALAR:
case OP_ARM64_SQDMULL2:
case OP_ARM64_SQDMULL2_BYSCALAR:
case OP_ARM64_SQDMLAL:
case OP_ARM64_SQDMLAL_BYSCALAR:
case OP_ARM64_SQDMLAL2:
case OP_ARM64_SQDMLAL2_BYSCALAR:
case OP_ARM64_SQDMLSL:
case OP_ARM64_SQDMLSL_BYSCALAR:
case OP_ARM64_SQDMLSL2:
case OP_ARM64_SQDMLSL2_BYSCALAR: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean scalar = FALSE;
gboolean add = FALSE;
gboolean subtract = FALSE;
gboolean high = FALSE;
switch (ins->opcode) {
case OP_ARM64_SQDMULL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL: break;
case OP_ARM64_SQDMULL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMULL2: high = TRUE; break;
case OP_ARM64_SQDMLAL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL: add = TRUE; break;
case OP_ARM64_SQDMLAL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLAL2: high = TRUE; add = TRUE; break;
case OP_ARM64_SQDMLSL_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL: subtract = TRUE; break;
case OP_ARM64_SQDMLSL2_BYSCALAR: scalar = TRUE; case OP_ARM64_SQDMLSL2: high = TRUE; subtract = TRUE; break;
}
int iid = 0;
if (add)
iid = INTRINS_AARCH64_ADV_SIMD_SQADD;
else if (subtract)
iid = INTRINS_AARCH64_ADV_SIMD_SQSUB;
LLVMValueRef mul1 = lhs;
LLVMValueRef mul2 = rhs;
if (iid != 0) {
mul1 = rhs;
mul2 = arg3;
}
if (scalar) {
LLVMTypeRef t = LLVMTypeOf (mul1);
unsigned int elems = LLVMGetVectorSize (t);
mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems);
}
LLVMValueRef args [] = { mul1, mul2 };
if (high)
for (int i = 0; i < 2; ++i)
args [i] = extract_high_elements (ctx, args [i]);
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQDMULL, ovr_tag, args, "");
LLVMValueRef args2 [] = { lhs, result };
if (iid != 0)
result = call_overloaded_intrins (ctx, iid, ovr_tag, args2, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQDMULL_SCALAR:
case OP_ARM64_SQDMLAL_SCALAR:
case OP_ARM64_SQDMLSL_SCALAR: {
/*
* define dso_local i32 @__vqdmlslh_lane_s16(i32, i16, <4 x i16>, i32) local_unnamed_addr #0 {
* %5 = insertelement <4 x i16> undef, i16 %1, i64 0
* %6 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef>
* %7 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %5, <4 x i16> %6)
* %8 = extractelement <4 x i32> %7, i64 0
* %9 = tail call i32 @llvm.aarch64.neon.sqsub.i32(i32 %0, i32 %8)
* ret i32 %9
* }
*
* define dso_local i64 @__vqdmlals_s32(i64, i32, i32) local_unnamed_addr #0 {
* %4 = tail call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %1, i32 %2) #2
* %5 = tail call i64 @llvm.aarch64.neon.sqadd.i64(i64 %0, i64 %4) #2
* ret i64 %5
* }
*/
int mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL;
int iid = 0;
gboolean scalar_mul_result = FALSE;
gboolean scalar_acc_result = FALSE;
switch (ins->opcode) {
case OP_ARM64_SQDMLAL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQADD; break;
case OP_ARM64_SQDMLSL_SCALAR: iid = INTRINS_AARCH64_ADV_SIMD_SQSUB; break;
}
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef mularg = lhs;
LLVMValueRef selected_scalar = rhs;
if (iid != 0) {
mularg = rhs;
selected_scalar = arg3;
}
llvm_ovr_tag_t multag = ovr_tag_smaller_elements (ovr_tag_from_llvm_type (ret_t));
llvm_ovr_tag_t iidtag = ovr_tag_force_scalar (ovr_tag_from_llvm_type (ret_t));
LLVMTypeRef mularg_t = ovr_tag_to_llvm_type (multag);
if (multag & INTRIN_int32) {
/* The (i32, i32) -> i64 variant of aarch64_neon_sqdmull has
* a unique, non-overloaded name.
*/
mulid = INTRINS_AARCH64_ADV_SIMD_SQDMULL_SCALAR;
multag = 0;
iidtag = INTRIN_int64 | INTRIN_scalar;
scalar_mul_result = TRUE;
scalar_acc_result = TRUE;
} else if (multag & INTRIN_int16) {
/* We were passed a (<4 x i16>, <4 x i16>) but the
* widening multiplication intrinsic will yield a <4 x i32>.
*/
multag = INTRIN_int32 | INTRIN_vector128;
} else
g_assert_not_reached ();
if (scalar_mul_result) {
mularg = scalar_from_vector (ctx, mularg);
selected_scalar = scalar_from_vector (ctx, selected_scalar);
} else {
mularg = keep_lowest_element (ctx, mularg_t, mularg);
selected_scalar = keep_lowest_element (ctx, mularg_t, selected_scalar);
}
LLVMValueRef mulargs [] = { mularg, selected_scalar };
LLVMValueRef result = call_overloaded_intrins (ctx, mulid, multag, mulargs, "arm64_sqdmull_scalar");
if (iid != 0) {
LLVMValueRef acc = scalar_from_vector (ctx, lhs);
if (!scalar_mul_result)
result = scalar_from_vector (ctx, result);
LLVMValueRef subargs [] = { acc, result };
result = call_overloaded_intrins (ctx, iid, iidtag, subargs, "arm64_sqdmlxl_scalar");
scalar_acc_result = TRUE;
}
if (scalar_acc_result)
result = vector_from_scalar (ctx, ret_t, result);
else
result = keep_lowest_element (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_FMUL_SEL: {
LLVMValueRef mul2 = LLVMBuildExtractElement (builder, rhs, arg3, "");
LLVMValueRef mul1 = scalar_from_vector (ctx, lhs);
LLVMValueRef result = LLVMBuildFMul (builder, mul1, mul2, "arm64_fmul_sel");
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_MLA:
case OP_ARM64_MLA_SCALAR:
case OP_ARM64_MLS:
case OP_ARM64_MLS_SCALAR: {
gboolean scalar = FALSE;
gboolean add = FALSE;
switch (ins->opcode) {
case OP_ARM64_MLA_SCALAR: scalar = TRUE; case OP_ARM64_MLA: add = TRUE; break;
case OP_ARM64_MLS_SCALAR: scalar = TRUE; case OP_ARM64_MLS: break;
}
LLVMTypeRef mul_t = LLVMTypeOf (rhs);
unsigned int elems = LLVMGetVectorSize (mul_t);
LLVMValueRef mul2 = arg3;
if (scalar)
mul2 = broadcast_element (ctx, scalar_from_vector (ctx, mul2), elems);
LLVMValueRef result = LLVMBuildMul (builder, rhs, mul2, "");
if (add)
result = LLVMBuildAdd (builder, lhs, result, "");
else
result = LLVMBuildSub (builder, lhs, result, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SMULL:
case OP_ARM64_SMULL_SCALAR:
case OP_ARM64_SMULL2:
case OP_ARM64_SMULL2_SCALAR:
case OP_ARM64_UMULL:
case OP_ARM64_UMULL_SCALAR:
case OP_ARM64_UMULL2:
case OP_ARM64_UMULL2_SCALAR:
case OP_ARM64_SMLAL:
case OP_ARM64_SMLAL_SCALAR:
case OP_ARM64_SMLAL2:
case OP_ARM64_SMLAL2_SCALAR:
case OP_ARM64_UMLAL:
case OP_ARM64_UMLAL_SCALAR:
case OP_ARM64_UMLAL2:
case OP_ARM64_UMLAL2_SCALAR:
case OP_ARM64_SMLSL:
case OP_ARM64_SMLSL_SCALAR:
case OP_ARM64_SMLSL2:
case OP_ARM64_SMLSL2_SCALAR:
case OP_ARM64_UMLSL:
case OP_ARM64_UMLSL_SCALAR:
case OP_ARM64_UMLSL2:
case OP_ARM64_UMLSL2_SCALAR: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
gboolean is_unsigned = FALSE;
gboolean high = FALSE;
gboolean add = FALSE;
gboolean subtract = FALSE;
gboolean scalar = FALSE;
int opcode = ins->opcode;
switch (opcode) {
case OP_ARM64_SMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL; break;
case OP_ARM64_UMULL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL; break;
case OP_ARM64_SMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL; break;
case OP_ARM64_UMLAL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL; break;
case OP_ARM64_SMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL; break;
case OP_ARM64_UMLSL_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL; break;
case OP_ARM64_SMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMULL2; break;
case OP_ARM64_UMULL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMULL2; break;
case OP_ARM64_SMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLAL2; break;
case OP_ARM64_UMLAL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLAL2; break;
case OP_ARM64_SMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_SMLSL2; break;
case OP_ARM64_UMLSL2_SCALAR: scalar = TRUE; opcode = OP_ARM64_UMLSL2; break;
}
switch (opcode) {
case OP_ARM64_SMULL2: high = TRUE; case OP_ARM64_SMULL: break;
case OP_ARM64_UMULL2: high = TRUE; case OP_ARM64_UMULL: is_unsigned = TRUE; break;
case OP_ARM64_SMLAL2: high = TRUE; case OP_ARM64_SMLAL: add = TRUE; break;
case OP_ARM64_UMLAL2: high = TRUE; case OP_ARM64_UMLAL: add = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_SMLSL2: high = TRUE; case OP_ARM64_SMLSL: subtract = TRUE; break;
case OP_ARM64_UMLSL2: high = TRUE; case OP_ARM64_UMLSL: subtract = TRUE; is_unsigned = TRUE; break;
}
int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UMULL : INTRINS_AARCH64_ADV_SIMD_SMULL;
LLVMValueRef intrin_args [] = { lhs, rhs };
if (add || subtract) {
intrin_args [0] = rhs;
intrin_args [1] = arg3;
}
if (scalar) {
LLVMValueRef sarg = intrin_args [1];
LLVMTypeRef t = LLVMTypeOf (intrin_args [0]);
unsigned int elems = LLVMGetVectorSize (t);
sarg = broadcast_element (ctx, scalar_from_vector (ctx, sarg), elems);
intrin_args [1] = sarg;
}
if (high)
for (int i = 0; i < 2; ++i)
intrin_args [i] = extract_high_elements (ctx, intrin_args [i]);
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
if (add)
result = LLVMBuildAdd (builder, lhs, result, "");
if (subtract)
result = LLVMBuildSub (builder, lhs, result, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_XNEG:
case OP_ARM64_XNEG_SCALAR: {
gboolean scalar = ins->opcode == OP_ARM64_XNEG_SCALAR;
gboolean is_float = FALSE;
switch (inst_c1_type (ins)) {
case MONO_TYPE_R4: case MONO_TYPE_R8: is_float = TRUE;
}
LLVMValueRef result = lhs;
if (scalar)
result = scalar_from_vector (ctx, result);
if (is_float)
result = LLVMBuildFNeg (builder, result, "arm64_xneg");
else
result = LLVMBuildNeg (builder, result, "arm64_xneg");
if (scalar)
result = vector_from_scalar (ctx, LLVMTypeOf (lhs), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_PMULL:
case OP_ARM64_PMULL2: {
gboolean high = ins->opcode == OP_ARM64_PMULL2;
LLVMValueRef args [] = { lhs, rhs };
if (high)
for (int i = 0; i < 2; ++i)
args [i] = extract_high_elements (ctx, args [i]);
LLVMValueRef result = call_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_PMULL, args, "arm64_pmull");
values [ins->dreg] = result;
break;
}
case OP_ARM64_REVN: {
LLVMTypeRef t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (t);
unsigned int group_bits = mono_llvm_get_prim_size_bits (elem_t);
unsigned int vec_bits = mono_llvm_get_prim_size_bits (t);
unsigned int tmp_bits = ins->inst_c0;
unsigned int tmp_elements = vec_bits / tmp_bits;
const int cycle8 [] = { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
const int cycle4 [] = { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
const int cycle2 [] = { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
const int *cycle = NULL;
switch (group_bits / tmp_bits) {
case 2: cycle = cycle2; break;
case 4: cycle = cycle4; break;
case 8: cycle = cycle8; break;
default: g_assert_not_reached ();
}
g_assert (tmp_elements <= ARM64_MAX_VECTOR_ELEMS);
LLVMTypeRef tmp_t = LLVMVectorType (LLVMIntType (tmp_bits), tmp_elements);
LLVMValueRef tmp = LLVMBuildBitCast (builder, lhs, tmp_t, "arm64_revn");
LLVMValueRef result = LLVMBuildShuffleVector (builder, tmp, LLVMGetUndef (tmp_t), create_const_vector_i32 (cycle, tmp_elements), "");
result = LLVMBuildBitCast (builder, result, t, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SHL:
case OP_ARM64_SSHR:
case OP_ARM64_SSRA:
case OP_ARM64_USHR:
case OP_ARM64_USRA: {
gboolean right = FALSE;
gboolean add = FALSE;
gboolean arith = FALSE;
switch (ins->opcode) {
case OP_ARM64_USHR: right = TRUE; break;
case OP_ARM64_USRA: right = TRUE; add = TRUE; break;
case OP_ARM64_SSHR: arith = TRUE; break;
case OP_ARM64_SSRA: arith = TRUE; add = TRUE; break;
}
LLVMValueRef shiftarg = lhs;
LLVMValueRef shift = rhs;
if (add) {
shiftarg = rhs;
shift = arg3;
}
shift = create_shift_vector (ctx, shiftarg, shift);
LLVMValueRef result = NULL;
if (right)
result = LLVMBuildLShr (builder, shiftarg, shift, "");
else if (arith)
result = LLVMBuildAShr (builder, shiftarg, shift, "");
else
result = LLVMBuildShl (builder, shiftarg, shift, "");
if (add)
result = LLVMBuildAdd (builder, lhs, result, "arm64_usra");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SHRN:
case OP_ARM64_SHRN2: {
LLVMValueRef shiftarg = lhs;
LLVMValueRef shift = rhs;
gboolean high = ins->opcode == OP_ARM64_SHRN2;
if (high) {
shiftarg = rhs;
shift = arg3;
}
LLVMTypeRef arg_t = LLVMTypeOf (shiftarg);
LLVMTypeRef elem_t = LLVMGetElementType (arg_t);
unsigned int elems = LLVMGetVectorSize (arg_t);
unsigned int bits = mono_llvm_get_prim_size_bits (elem_t);
LLVMTypeRef trunc_t = LLVMVectorType (LLVMIntType (bits / 2), elems);
shift = create_shift_vector (ctx, shiftarg, shift);
LLVMValueRef result = LLVMBuildLShr (builder, shiftarg, shift, "shrn");
result = LLVMBuildTrunc (builder, result, trunc_t, "");
if (high) {
result = concatenate_vectors (ctx, lhs, result);
}
values [ins->dreg] = result;
break;
}
case OP_ARM64_SRSHR:
case OP_ARM64_SRSRA:
case OP_ARM64_URSHR:
case OP_ARM64_URSRA: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef shiftarg = lhs;
LLVMValueRef shift = rhs;
gboolean right = FALSE;
gboolean add = FALSE;
switch (ins->opcode) {
case OP_ARM64_URSRA: add = TRUE; case OP_ARM64_URSHR: right = TRUE; break;
case OP_ARM64_SRSRA: add = TRUE; case OP_ARM64_SRSHR: right = TRUE; break;
}
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_URSRA: case OP_ARM64_URSHR: iid = INTRINS_AARCH64_ADV_SIMD_URSHL; break;
case OP_ARM64_SRSRA: case OP_ARM64_SRSHR: iid = INTRINS_AARCH64_ADV_SIMD_SRSHL; break;
}
if (add) {
shiftarg = rhs;
shift = arg3;
}
if (right)
shift = LLVMBuildNeg (builder, shift, "");
shift = create_shift_vector (ctx, shiftarg, shift);
LLVMValueRef args [] = { shiftarg, shift };
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
if (add)
result = LLVMBuildAdd (builder, result, lhs, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_XNSHIFT_SCALAR:
case OP_ARM64_XNSHIFT:
case OP_ARM64_XNSHIFT2: {
LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t);
LLVMValueRef shift_arg = lhs;
LLVMValueRef shift_amount = rhs;
gboolean high = FALSE;
gboolean scalar = FALSE;
int iid = ins->inst_c0;
switch (ins->opcode) {
case OP_ARM64_XNSHIFT_SCALAR: scalar = TRUE; break;
case OP_ARM64_XNSHIFT2: high = TRUE; break;
}
if (high) {
shift_arg = rhs;
shift_amount = arg3;
ovr_tag = ovr_tag_smaller_vector (ovr_tag);
intrin_result_t = ovr_tag_to_llvm_type (ovr_tag);
}
LLVMTypeRef shift_arg_t = LLVMTypeOf (shift_arg);
LLVMTypeRef shift_arg_elem_t = LLVMGetElementType (shift_arg_t);
unsigned int element_bits = mono_llvm_get_prim_size_bits (shift_arg_elem_t);
int range_min = 1;
int range_max = element_bits / 2;
if (scalar) {
unsigned int elems = LLVMGetVectorSize (shift_arg_t);
LLVMValueRef lo = scalar_from_vector (ctx, shift_arg);
shift_arg = vector_from_scalar (ctx, LLVMVectorType (shift_arg_elem_t, elems * 2), lo);
}
int max_index = range_max - range_min + 1;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, shift_amount, intrin_result_t, "arm64_xnshift");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int shift_const = i + range_min;
LLVMValueRef intrin_args [] = { shift_arg, const_int32 (shift_const) };
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
immediate_unroll_commit (&ictx, shift_const, result);
}
{
immediate_unroll_default (&ictx);
LLVMValueRef intrin_args [] = { shift_arg, const_int32 (range_max) };
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
immediate_unroll_commit_default (&ictx, result);
}
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
if (high)
result = concatenate_vectors (ctx, lhs, result);
if (scalar)
result = keep_lowest_element (ctx, LLVMTypeOf (result), result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQSHLU:
case OP_ARM64_SQSHLU_SCALAR: {
gboolean scalar = ins->opcode == OP_ARM64_SQSHLU_SCALAR;
LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (intrin_result_t);
unsigned int element_bits = mono_llvm_get_prim_size_bits (elem_t);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t);
int max_index = element_bits;
ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, intrin_result_t, ins);
intrin_result_t = scalar ? sctx.intermediate_type : intrin_result_t;
ovr_tag = scalar ? sctx.ovr_tag : ovr_tag;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, rhs, intrin_result_t, "arm64_sqshlu");
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int shift_const = i;
LLVMValueRef args [2] = { lhs, create_shift_vector (ctx, lhs, const_int32 (shift_const)) };
if (scalar)
scalar_op_from_vector_op_process_args (&sctx, args, 2);
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_SQSHLU, ovr_tag, args, "");
immediate_unroll_commit (&ictx, shift_const, result);
}
{
immediate_unroll_default (&ictx);
LLVMValueRef srcarg = lhs;
if (scalar)
scalar_op_from_vector_op_process_args (&sctx, &srcarg, 1);
immediate_unroll_commit_default (&ictx, srcarg);
}
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
if (scalar)
result = scalar_op_from_vector_op_process_result (&sctx, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SSHLL:
case OP_ARM64_SSHLL2:
case OP_ARM64_USHLL:
case OP_ARM64_USHLL2: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean high = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_SSHLL2: high = TRUE; break;
case OP_ARM64_USHLL2: high = TRUE; case OP_ARM64_USHLL: is_unsigned = TRUE; break;
}
LLVMValueRef result = lhs;
if (high)
result = extract_high_elements (ctx, result);
if (is_unsigned)
result = LLVMBuildZExt (builder, result, ret_t, "arm64_ushll");
else
result = LLVMBuildSExt (builder, result, ret_t, "arm64_ushll");
result = LLVMBuildShl (builder, result, create_shift_vector (ctx, result, rhs), "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SLI:
case OP_ARM64_SRI: {
LLVMTypeRef intrin_result_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (intrin_result_t);
unsigned int element_bits = mono_llvm_get_prim_size_bits (LLVMGetElementType (intrin_result_t));
int range_min = 0;
int range_max = element_bits - 1;
if (ins->opcode == OP_ARM64_SRI) {
++range_min;
++range_max;
}
int iid = ins->opcode == OP_ARM64_SRI ? INTRINS_AARCH64_ADV_SIMD_SRI : INTRINS_AARCH64_ADV_SIMD_SLI;
int max_index = range_max - range_min + 1;
ImmediateUnrollCtx ictx = immediate_unroll_begin (ctx, bb, max_index, arg3, intrin_result_t, "arm64_ext");
LLVMValueRef intrin_args [3] = { lhs, rhs, arg3 };
int i = 0;
while (immediate_unroll_next (&ictx, &i)) {
int shift_const = i + range_min;
intrin_args [2] = const_int32 (shift_const);
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, intrin_args, "");
immediate_unroll_commit (&ictx, shift_const, result);
}
immediate_unroll_default (&ictx);
immediate_unroll_commit_default (&ictx, lhs);
LLVMValueRef result = immediate_unroll_end (&ictx, &cbb);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SQRT_SCALAR: {
int iid = ins->inst_c0 == MONO_TYPE_R8 ? INTRINS_SQRT : INTRINS_SQRTF;
LLVMTypeRef t = LLVMTypeOf (lhs);
LLVMValueRef scalar = LLVMBuildExtractElement (builder, lhs, const_int32 (0), "");
LLVMValueRef result = call_intrins (ctx, iid, &scalar, "arm64_sqrt_scalar");
values [ins->dreg] = LLVMBuildInsertElement (builder, LLVMGetUndef (t), result, const_int32 (0), "");
break;
}
case OP_ARM64_STP:
case OP_ARM64_STP_SCALAR:
case OP_ARM64_STNP:
case OP_ARM64_STNP_SCALAR: {
gboolean nontemporal = FALSE;
gboolean scalar = FALSE;
switch (ins->opcode) {
case OP_ARM64_STNP: nontemporal = TRUE; break;
case OP_ARM64_STNP_SCALAR: nontemporal = TRUE; scalar = TRUE; break;
case OP_ARM64_STP_SCALAR: scalar = TRUE; break;
}
LLVMTypeRef rhs_t = LLVMTypeOf (rhs);
LLVMValueRef val = NULL;
LLVMTypeRef dst_t = LLVMPointerType (rhs_t, 0);
if (scalar)
val = LLVMBuildShuffleVector (builder, rhs, arg3, create_const_vector_2_i32 (0, 2), "");
else {
unsigned int rhs_elems = LLVMGetVectorSize (rhs_t);
LLVMTypeRef rhs_elt_t = LLVMGetElementType (rhs_t);
dst_t = LLVMPointerType (LLVMVectorType (rhs_elt_t, rhs_elems * 2), 0);
val = concatenate_vectors (ctx, rhs, arg3);
}
LLVMValueRef address = convert (ctx, lhs, dst_t);
LLVMValueRef store = mono_llvm_build_store (builder, val, address, FALSE, LLVM_BARRIER_NONE);
if (nontemporal)
set_nontemporal_flag (store);
break;
}
case OP_ARM64_LD1_INSERT: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
LLVMValueRef address = convert (ctx, arg3, LLVMPointerType (elem_t, 0));
unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8;
LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1_insert", FALSE, alignment);
result = LLVMBuildInsertElement (builder, lhs, result, rhs, "arm64_ld1_insert");
values [ins->dreg] = result;
break;
}
case OP_ARM64_LD1R:
case OP_ARM64_LD1: {
gboolean replicate = ins->opcode == OP_ARM64_LD1R;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
unsigned int alignment = mono_llvm_get_prim_size_bits (ret_t) / 8;
LLVMValueRef address = lhs;
LLVMTypeRef address_t = LLVMPointerType (ret_t, 0);
if (replicate) {
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
address_t = LLVMPointerType (elem_t, 0);
}
address = convert (ctx, address, address_t);
LLVMValueRef result = mono_llvm_build_aligned_load (builder, address, "arm64_ld1", FALSE, alignment);
if (replicate) {
unsigned int elems = LLVMGetVectorSize (ret_t);
result = broadcast_element (ctx, result, elems);
}
values [ins->dreg] = result;
break;
}
case OP_ARM64_LDNP:
case OP_ARM64_LDNP_SCALAR:
case OP_ARM64_LDP:
case OP_ARM64_LDP_SCALAR: {
const char *oname = NULL;
gboolean nontemporal = FALSE;
gboolean scalar = FALSE;
switch (ins->opcode) {
case OP_ARM64_LDNP: oname = "arm64_ldnp"; nontemporal = TRUE; break;
case OP_ARM64_LDNP_SCALAR: oname = "arm64_ldnp_scalar"; nontemporal = TRUE; scalar = TRUE; break;
case OP_ARM64_LDP: oname = "arm64_ldp"; break;
case OP_ARM64_LDP_SCALAR: oname = "arm64_ldp_scalar"; scalar = TRUE; break;
}
if (!addresses [ins->dreg])
addresses [ins->dreg] = build_named_alloca (ctx, m_class_get_byval_arg (ins->klass), oname);
LLVMTypeRef ret_t = simd_valuetuple_to_llvm_type (ctx, ins->klass);
LLVMTypeRef vec_t = LLVMGetElementType (ret_t);
LLVMValueRef ix = const_int32 (1);
LLVMTypeRef src_t = LLVMPointerType (scalar ? LLVMGetElementType (vec_t) : vec_t, 0);
LLVMValueRef src0 = convert (ctx, lhs, src_t);
LLVMValueRef src1 = LLVMBuildGEP (builder, src0, &ix, 1, oname);
LLVMValueRef vals [] = { src0, src1 };
for (int i = 0; i < 2; ++i) {
vals [i] = LLVMBuildLoad (builder, vals [i], oname);
if (nontemporal)
set_nontemporal_flag (vals [i]);
}
unsigned int vec_sz = mono_llvm_get_prim_size_bits (vec_t);
if (scalar) {
g_assert (vec_sz == 64);
LLVMValueRef undef = LLVMGetUndef (vec_t);
for (int i = 0; i < 2; ++i)
vals [i] = LLVMBuildInsertElement (builder, undef, vals [i], const_int32 (0), oname);
}
LLVMValueRef val = LLVMGetUndef (ret_t);
for (int i = 0; i < 2; ++i)
val = LLVMBuildInsertValue (builder, val, vals [i], i, oname);
LLVMTypeRef retptr_t = LLVMPointerType (ret_t, 0);
LLVMValueRef dst = convert (ctx, addresses [ins->dreg], retptr_t);
LLVMBuildStore (builder, val, dst);
values [ins->dreg] = vec_sz == 64 ? val : NULL;
break;
}
case OP_ARM64_ST1: {
LLVMTypeRef t = LLVMTypeOf (rhs);
LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0));
unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8;
mono_llvm_build_aligned_store (builder, rhs, address, FALSE, alignment);
break;
}
case OP_ARM64_ST1_SCALAR: {
LLVMTypeRef t = LLVMGetElementType (LLVMTypeOf (rhs));
LLVMValueRef val = LLVMBuildExtractElement (builder, rhs, arg3, "arm64_st1_scalar");
LLVMValueRef address = convert (ctx, lhs, LLVMPointerType (t, 0));
unsigned int alignment = mono_llvm_get_prim_size_bits (t) / 8;
mono_llvm_build_aligned_store (builder, val, address, FALSE, alignment);
break;
}
case OP_ARM64_ADDHN:
case OP_ARM64_ADDHN2:
case OP_ARM64_SUBHN:
case OP_ARM64_SUBHN2:
case OP_ARM64_RADDHN:
case OP_ARM64_RADDHN2:
case OP_ARM64_RSUBHN:
case OP_ARM64_RSUBHN2: {
LLVMValueRef args [2] = { lhs, rhs };
gboolean high = FALSE;
gboolean subtract = FALSE;
int iid = 0;
switch (ins->opcode) {
case OP_ARM64_ADDHN2: high = TRUE; case OP_ARM64_ADDHN: break;
case OP_ARM64_SUBHN2: high = TRUE; case OP_ARM64_SUBHN: subtract = TRUE; break;
case OP_ARM64_RSUBHN2: high = TRUE; case OP_ARM64_RSUBHN: iid = INTRINS_AARCH64_ADV_SIMD_RSUBHN; break;
case OP_ARM64_RADDHN2: high = TRUE; case OP_ARM64_RADDHN: iid = INTRINS_AARCH64_ADV_SIMD_RADDHN; break;
}
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
if (high) {
args [0] = rhs;
args [1] = arg3;
ovr_tag = ovr_tag_smaller_vector (ovr_tag);
}
LLVMValueRef result = NULL;
if (iid != 0)
result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
else {
LLVMTypeRef t = LLVMTypeOf (args [0]);
LLVMTypeRef elt_t = LLVMGetElementType (t);
unsigned int elems = LLVMGetVectorSize (t);
unsigned int elem_bits = mono_llvm_get_prim_size_bits (elt_t);
if (subtract)
result = LLVMBuildSub (builder, args [0], args [1], "");
else
result = LLVMBuildAdd (builder, args [0], args [1], "");
result = LLVMBuildLShr (builder, result, broadcast_constant (elem_bits / 2, elt_t, elems), "");
result = LLVMBuildTrunc (builder, result, LLVMVectorType (LLVMIntType (elem_bits / 2), elems), "");
}
if (high)
result = concatenate_vectors (ctx, lhs, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SADD:
case OP_ARM64_UADD:
case OP_ARM64_SADD2:
case OP_ARM64_UADD2:
case OP_ARM64_SSUB:
case OP_ARM64_USUB:
case OP_ARM64_SSUB2:
case OP_ARM64_USUB2: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean is_unsigned = FALSE;
gboolean high = FALSE;
gboolean subtract = FALSE;
switch (ins->opcode) {
case OP_ARM64_SADD2: high = TRUE; case OP_ARM64_SADD: break;
case OP_ARM64_UADD2: high = TRUE; case OP_ARM64_UADD: is_unsigned = TRUE; break;
case OP_ARM64_SSUB2: high = TRUE; case OP_ARM64_SSUB: subtract = TRUE; break;
case OP_ARM64_USUB2: high = TRUE; case OP_ARM64_USUB: subtract = TRUE; is_unsigned = TRUE; break;
}
LLVMValueRef args [] = { lhs, rhs };
for (int i = 0; i < 2; ++i) {
LLVMValueRef arg = args [i];
LLVMTypeRef arg_t = LLVMTypeOf (arg);
if (high && arg_t != ret_t)
arg = extract_high_elements (ctx, arg);
if (is_unsigned)
arg = LLVMBuildZExt (builder, arg, ret_t, "");
else
arg = LLVMBuildSExt (builder, arg, ret_t, "");
args [i] = arg;
}
LLVMValueRef result = NULL;
if (subtract)
result = LLVMBuildSub (builder, args [0], args [1], "arm64_sub");
else
result = LLVMBuildAdd (builder, args [0], args [1], "arm64_add");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SABAL:
case OP_ARM64_SABAL2:
case OP_ARM64_UABAL:
case OP_ARM64_UABAL2:
case OP_ARM64_SABDL:
case OP_ARM64_SABDL2:
case OP_ARM64_UABDL:
case OP_ARM64_UABDL2:
case OP_ARM64_SABA:
case OP_ARM64_UABA:
case OP_ARM64_SABD:
case OP_ARM64_UABD: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
gboolean is_unsigned = FALSE;
gboolean high = FALSE;
gboolean add = FALSE;
gboolean widen = FALSE;
switch (ins->opcode) {
case OP_ARM64_SABAL2: high = TRUE; case OP_ARM64_SABAL: widen = TRUE; add = TRUE; break;
case OP_ARM64_UABAL2: high = TRUE; case OP_ARM64_UABAL: widen = TRUE; add = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_SABDL2: high = TRUE; case OP_ARM64_SABDL: widen = TRUE; break;
case OP_ARM64_UABDL2: high = TRUE; case OP_ARM64_UABDL: widen = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_SABA: add = TRUE; break;
case OP_ARM64_UABA: add = TRUE; is_unsigned = TRUE; break;
case OP_ARM64_UABD: is_unsigned = TRUE; break;
}
LLVMValueRef args [] = { lhs, rhs };
if (add) {
args [0] = rhs;
args [1] = arg3;
}
if (high)
for (int i = 0; i < 2; ++i)
args [i] = extract_high_elements (ctx, args [i]);
int iid = is_unsigned ? INTRINS_AARCH64_ADV_SIMD_UABD : INTRINS_AARCH64_ADV_SIMD_SABD;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (LLVMTypeOf (args [0]));
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
if (widen)
result = LLVMBuildZExt (builder, result, ret_t, "");
if (add)
result = LLVMBuildAdd (builder, result, lhs, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_XHORIZ: {
gboolean truncate = FALSE;
LLVMTypeRef arg_t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (arg_t);
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t);
if (elem_t == i1_t || elem_t == i2_t)
truncate = TRUE;
LLVMValueRef result = call_overloaded_intrins (ctx, ins->inst_c0, ovr_tag, &lhs, "");
if (truncate) {
// @llvm.aarch64.neon.saddv.i32.v8i16 ought to return an i16, but doesn't in LLVM 9.
result = LLVMBuildTrunc (builder, result, elem_t, "");
}
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_SADDLV:
case OP_ARM64_UADDLV: {
LLVMTypeRef arg_t = LLVMTypeOf (lhs);
LLVMTypeRef elem_t = LLVMGetElementType (arg_t);
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_llvm_type (arg_t);
gboolean truncate = elem_t == i1_t;
int iid = ins->opcode == OP_ARM64_UADDLV ? INTRINS_AARCH64_ADV_SIMD_UADDLV : INTRINS_AARCH64_ADV_SIMD_SADDLV;
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, "");
if (truncate) {
// @llvm.aarch64.neon.saddlv.i32.v16i8 ought to return an i16, but doesn't in LLVM 9.
result = LLVMBuildTrunc (builder, result, i2_t, "");
}
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_ARM64_UADALP:
case OP_ARM64_SADALP: {
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
int iid = ins->opcode == OP_ARM64_UADALP ? INTRINS_AARCH64_ADV_SIMD_UADDLP : INTRINS_AARCH64_ADV_SIMD_SADDLP;
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, &rhs, "");
result = LLVMBuildAdd (builder, result, lhs, "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_ADDP_SCALAR: {
llvm_ovr_tag_t ovr_tag = INTRIN_vector128 | INTRIN_int64;
LLVMValueRef result = call_overloaded_intrins (ctx, INTRINS_AARCH64_ADV_SIMD_UADDV, ovr_tag, &lhs, "arm64_addp_scalar");
result = LLVMBuildInsertElement (builder, LLVMConstNull (v64_i8_t), result, const_int32 (0), "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_FADDP_SCALAR: {
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMValueRef hi = LLVMBuildExtractElement (builder, lhs, const_int32 (0), "");
LLVMValueRef lo = LLVMBuildExtractElement (builder, lhs, const_int32 (1), "");
LLVMValueRef result = LLVMBuildFAdd (builder, hi, lo, "arm64_faddp_scalar");
result = LLVMBuildInsertElement (builder, LLVMConstNull (ret_t), result, const_int32 (0), "");
values [ins->dreg] = result;
break;
}
case OP_ARM64_SXTL:
case OP_ARM64_SXTL2:
case OP_ARM64_UXTL:
case OP_ARM64_UXTL2: {
gboolean high = FALSE;
gboolean is_unsigned = FALSE;
switch (ins->opcode) {
case OP_ARM64_SXTL2: high = TRUE; break;
case OP_ARM64_UXTL2: high = TRUE; case OP_ARM64_UXTL: is_unsigned = TRUE; break;
}
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int elem_bits = LLVMGetIntTypeWidth (LLVMGetElementType (t));
unsigned int src_elems = LLVMGetVectorSize (t);
unsigned int dst_elems = src_elems;
LLVMValueRef arg = lhs;
if (high) {
arg = extract_high_elements (ctx, lhs);
dst_elems = LLVMGetVectorSize (LLVMTypeOf (arg));
}
LLVMTypeRef result_t = LLVMVectorType (LLVMIntType (elem_bits * 2), dst_elems);
LLVMValueRef result = NULL;
if (is_unsigned)
result = LLVMBuildZExt (builder, arg, result_t, "arm64_uxtl");
else
result = LLVMBuildSExt (builder, arg, result_t, "arm64_sxtl");
values [ins->dreg] = result;
break;
}
case OP_ARM64_TRN1:
case OP_ARM64_TRN2: {
gboolean high = ins->opcode == OP_ARM64_TRN2;
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
int laneix = high ? 1 : 0;
for (unsigned int i = 0; i < src_elems; i += 2) {
mask [i] = laneix;
mask [i + 1] = laneix + src_elems;
laneix += 2;
}
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp");
break;
}
case OP_ARM64_UZP1:
case OP_ARM64_UZP2: {
gboolean high = ins->opcode == OP_ARM64_UZP2;
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
int laneix = high ? 1 : 0;
for (unsigned int i = 0; i < src_elems; ++i) {
mask [i] = laneix;
laneix += 2;
}
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_uzp");
break;
}
case OP_ARM64_ZIP1:
case OP_ARM64_ZIP2: {
gboolean high = ins->opcode == OP_ARM64_ZIP2;
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int src_elems = LLVMGetVectorSize (t);
int mask [MAX_VECTOR_ELEMS] = { 0 };
int laneix = high ? src_elems / 2 : 0;
for (unsigned int i = 0; i < src_elems; i += 2) {
mask [i] = laneix;
mask [i + 1] = laneix + src_elems;
++laneix;
}
values [ins->dreg] = LLVMBuildShuffleVector (builder, lhs, rhs, create_const_vector_i32 (mask, src_elems), "arm64_zip");
break;
}
case OP_ARM64_ABSCOMPARE: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
gboolean scalar = ins->inst_c1;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
LLVMTypeRef elem_t = LLVMGetElementType (ret_t);
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
ovr_tag = ovr_tag_corresponding_integer (ovr_tag);
LLVMValueRef args [] = { lhs, rhs };
LLVMTypeRef result_t = ret_t;
if (scalar) {
ovr_tag = ovr_tag_force_scalar (ovr_tag);
result_t = elem_t;
for (int i = 0; i < 2; ++i)
args [i] = scalar_from_vector (ctx, args [i]);
}
LLVMValueRef result = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
result = LLVMBuildBitCast (builder, result, result_t, "");
if (scalar)
result = vector_from_scalar (ctx, ret_t, result);
values [ins->dreg] = result;
break;
}
case OP_XOP_OVR_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, &lhs, "");
break;
}
case OP_XOP_OVR_X_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef args [] = { lhs, rhs };
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
break;
}
case OP_XOP_OVR_X_X_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMValueRef args [] = { lhs, rhs, arg3 };
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
break;
}
case OP_XOP_OVR_BYSCALAR_X_X_X: {
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
llvm_ovr_tag_t ovr_tag = ovr_tag_from_mono_vector_class (ins->klass);
LLVMTypeRef t = LLVMTypeOf (lhs);
unsigned int elems = LLVMGetVectorSize (t);
LLVMValueRef arg2 = broadcast_element (ctx, scalar_from_vector (ctx, rhs), elems);
LLVMValueRef args [] = { lhs, arg2 };
values [ins->dreg] = call_overloaded_intrins (ctx, iid, ovr_tag, args, "");
break;
}
case OP_XOP_OVR_SCALAR_X_X:
case OP_XOP_OVR_SCALAR_X_X_X:
case OP_XOP_OVR_SCALAR_X_X_X_X: {
int num_args = 0;
IntrinsicId iid = (IntrinsicId) ins->inst_c0;
LLVMTypeRef ret_t = simd_class_to_llvm_type (ctx, ins->klass);
switch (ins->opcode) {
case OP_XOP_OVR_SCALAR_X_X: num_args = 1; break;
case OP_XOP_OVR_SCALAR_X_X_X: num_args = 2; break;
case OP_XOP_OVR_SCALAR_X_X_X_X: num_args = 3; break;
}
/* LLVM 9 NEON intrinsic functions have scalar overloads. Unfortunately
* only overloads for 32 and 64-bit integers and floating point types are
* supported. 8 and 16-bit integers are unsupported, and will fail during
* instruction selection. This is worked around by using a vector
* operation and then explicitly clearing the upper bits of the register.
*/
ScalarOpFromVectorOpCtx sctx = scalar_op_from_vector_op (ctx, ret_t, ins);
LLVMValueRef args [3] = { lhs, rhs, arg3 };
scalar_op_from_vector_op_process_args (&sctx, args, num_args);
LLVMValueRef result = call_overloaded_intrins (ctx, iid, sctx.ovr_tag, args, "");
result = scalar_op_from_vector_op_process_result (&sctx, result);
values [ins->dreg] = result;
break;
}
#endif
case OP_DUMMY_USE:
break;
/*
* EXCEPTION HANDLING
*/
case OP_IMPLICIT_EXCEPTION:
/* This marks a place where an implicit exception can happen */
if (bb->region != -1)
set_failure (ctx, "implicit-exception");
break;
case OP_THROW:
case OP_RETHROW: {
gboolean rethrow = (ins->opcode == OP_RETHROW);
if (ctx->llvm_only) {
emit_llvmonly_throw (ctx, bb, rethrow, lhs);
has_terminator = TRUE;
ctx->unreachable [bb->block_num] = TRUE;
} else {
emit_throw (ctx, bb, rethrow, lhs);
builder = ctx->builder;
}
break;
}
case OP_CALL_HANDLER: {
/*
* We don't 'call' handlers, but instead simply branch to them.
* The code generated by ENDFINALLY will branch back to us.
*/
LLVMBasicBlockRef noex_bb;
GSList *bb_list;
BBInfo *info = &bblocks [ins->inst_target_bb->block_num];
bb_list = info->call_handler_return_bbs;
/*
* Set the indicator variable for the finally clause.
*/
lhs = info->finally_ind;
g_assert (lhs);
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), g_slist_length (bb_list) + 1, FALSE), lhs);
/* Branch to the finally clause */
LLVMBuildBr (builder, info->call_handler_target_bb);
noex_bb = gen_bb (ctx, "CALL_HANDLER_CONT_BB");
info->call_handler_return_bbs = g_slist_append_mempool (cfg->mempool, info->call_handler_return_bbs, noex_bb);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, noex_bb);
bblocks [bb->block_num].end_bblock = noex_bb;
break;
}
case OP_START_HANDLER: {
break;
}
case OP_ENDFINALLY: {
LLVMBasicBlockRef resume_bb;
MonoBasicBlock *handler_bb;
LLVMValueRef val, switch_ins, callee;
GSList *bb_list;
BBInfo *info;
gboolean is_fault = MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FAULT;
/*
* Fault clauses are like finally clauses, but they are only called if an exception is thrown.
*/
if (!is_fault) {
handler_bb = (MonoBasicBlock*)g_hash_table_lookup (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region)));
g_assert (handler_bb);
info = &bblocks [handler_bb->block_num];
lhs = info->finally_ind;
g_assert (lhs);
bb_list = info->call_handler_return_bbs;
resume_bb = gen_bb (ctx, "ENDFINALLY_RESUME_BB");
/* Load the finally variable */
val = LLVMBuildLoad (builder, lhs, "");
/* Reset the variable */
LLVMBuildStore (builder, LLVMConstInt (LLVMInt32Type (), 0, FALSE), lhs);
/* Branch to either resume_bb, or to the bblocks in bb_list */
switch_ins = LLVMBuildSwitch (builder, val, resume_bb, g_slist_length (bb_list));
/*
* The other targets are added at the end to handle OP_CALL_HANDLER
* opcodes processed later.
*/
info->endfinally_switch_ins_list = g_slist_append_mempool (cfg->mempool, info->endfinally_switch_ins_list, switch_ins);
builder = ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, resume_bb);
}
if (ctx->llvm_only) {
if (!cfg->deopt) {
emit_resume_eh (ctx, bb);
} else {
/* Not needed */
LLVMBuildUnreachable (builder);
}
} else {
LLVMTypeRef icall_sig = LLVMFunctionType (LLVMVoidType (), NULL, 0, FALSE);
if (ctx->cfg->compile_aot) {
callee = get_callee (ctx, icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline));
} else {
callee = get_jit_callee (ctx, "llvm_resume_unwind_trampoline", icall_sig, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_llvm_resume_unwind_trampoline));
}
LLVMBuildCall (builder, callee, NULL, 0, "");
LLVMBuildUnreachable (builder);
}
has_terminator = TRUE;
break;
}
case OP_ENDFILTER: {
g_assert (cfg->llvm_only && cfg->deopt);
LLVMBuildUnreachable (builder);
has_terminator = TRUE;
break;
}
case OP_IL_SEQ_POINT:
break;
default: {
char reason [128];
sprintf (reason, "opcode %s", mono_inst_name (ins->opcode));
set_failure (ctx, reason);
break;
}
}
if (!ctx_ok (ctx))
break;
/* Convert the value to the type required by phi nodes */
if (spec [MONO_INST_DEST] != ' ' && !MONO_IS_STORE_MEMBASE (ins) && ctx->vreg_types [ins->dreg]) {
if (ctx->is_vphi [ins->dreg])
/* vtypes */
values [ins->dreg] = addresses [ins->dreg];
else
values [ins->dreg] = convert (ctx, values [ins->dreg], ctx->vreg_types [ins->dreg]);
}
/* Add stores for volatile/ref variables */
if (spec [MONO_INST_DEST] != ' ' && spec [MONO_INST_DEST] != 'v' && !MONO_IS_STORE_MEMBASE (ins)) {
if (!skip_volatile_store)
emit_volatile_store (ctx, ins->dreg);
#ifdef TARGET_WASM
if (vreg_is_ref (cfg, ins->dreg) && ctx->values [ins->dreg])
emit_gc_pin (ctx, builder, ins->dreg);
#endif
}
}
if (!ctx_ok (ctx))
return;
if (!has_terminator && bb->next_bb && (bb == cfg->bb_entry || bb->in_count > 0)) {
LLVMBuildBr (builder, get_bb (ctx, bb->next_bb));
}
if (bb == cfg->bb_exit && sig->ret->type == MONO_TYPE_VOID) {
emit_dbg_loc (ctx, builder, cfg->header->code + cfg->header->code_size - 1);
LLVMBuildRetVoid (builder);
}
if (bb == cfg->bb_entry)
ctx->last_alloca = LLVMGetLastInstruction (get_bb (ctx, cfg->bb_entry));
}
/*
* mono_llvm_check_method_supported:
*
* Do some quick checks to decide whenever cfg->method can be compiled by LLVM, to avoid
* compiling a method twice.
*/
void
mono_llvm_check_method_supported (MonoCompile *cfg)
{
int i, j;
#ifdef TARGET_WASM
if (mono_method_signature_internal (cfg->method)->call_convention == MONO_CALL_VARARG) {
cfg->exception_message = g_strdup ("vararg callconv");
cfg->disable_llvm = TRUE;
return;
}
#endif
if (cfg->llvm_only)
return;
if (cfg->method->save_lmf) {
cfg->exception_message = g_strdup ("lmf");
cfg->disable_llvm = TRUE;
}
if (cfg->disable_llvm)
return;
/*
* Nested clauses where one of the clauses is a finally clause is
* not supported, because LLVM can't figure out the control flow,
* probably because we resume exception handling by calling our
* own function instead of using the 'resume' llvm instruction.
*/
for (i = 0; i < cfg->header->num_clauses; ++i) {
for (j = 0; j < cfg->header->num_clauses; ++j) {
MonoExceptionClause *clause1 = &cfg->header->clauses [i];
MonoExceptionClause *clause2 = &cfg->header->clauses [j];
// FIXME: Nested try clauses fail in some cases too, i.e. #37273
if (i != j && clause1->try_offset >= clause2->try_offset && clause1->handler_offset <= clause2->handler_offset) {
//(clause1->flags == MONO_EXCEPTION_CLAUSE_FINALLY || clause2->flags == MONO_EXCEPTION_CLAUSE_FINALLY)) {
cfg->exception_message = g_strdup ("nested clauses");
cfg->disable_llvm = TRUE;
break;
}
}
}
if (cfg->disable_llvm)
return;
/* FIXME: */
if (cfg->method->dynamic) {
cfg->exception_message = g_strdup ("dynamic.");
cfg->disable_llvm = TRUE;
}
if (cfg->disable_llvm)
return;
}
static LLVMCallInfo*
get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
LLVMCallInfo *linfo;
int i;
if (cfg->gsharedvt && cfg->llvm_only && mini_is_gsharedvt_variable_signature (sig)) {
int i, n, pindex;
/*
* Gsharedvt methods have the following calling convention:
* - all arguments are passed by ref, even non generic ones
* - the return value is returned by ref too, using a vret
* argument passed after 'this'.
*/
n = sig->param_count + sig->hasthis;
linfo = (LLVMCallInfo*)mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
pindex = 0;
if (sig->hasthis)
linfo->args [pindex ++].storage = LLVMArgNormal;
if (sig->ret->type != MONO_TYPE_VOID) {
if (mini_is_gsharedvt_variable_type (sig->ret))
linfo->ret.storage = LLVMArgGsharedvtVariable;
else if (mini_type_is_vtype (sig->ret))
linfo->ret.storage = LLVMArgGsharedvtFixedVtype;
else
linfo->ret.storage = LLVMArgGsharedvtFixed;
linfo->vret_arg_index = pindex;
} else {
linfo->ret.storage = LLVMArgNone;
}
for (i = 0; i < sig->param_count; ++i) {
if (m_type_is_byref (sig->params [i]))
linfo->args [pindex].storage = LLVMArgNormal;
else if (mini_is_gsharedvt_variable_type (sig->params [i]))
linfo->args [pindex].storage = LLVMArgGsharedvtVariable;
else if (mini_type_is_vtype (sig->params [i]))
linfo->args [pindex].storage = LLVMArgGsharedvtFixedVtype;
else
linfo->args [pindex].storage = LLVMArgGsharedvtFixed;
linfo->args [pindex].type = sig->params [i];
pindex ++;
}
return linfo;
}
linfo = mono_arch_get_llvm_call_info (cfg, sig);
linfo->dummy_arg_pindex = -1;
for (i = 0; i < sig->param_count; ++i)
linfo->args [i + sig->hasthis].type = sig->params [i];
return linfo;
}
static void
emit_method_inner (EmitContext *ctx);
static void
free_ctx (EmitContext *ctx)
{
GSList *l;
g_free (ctx->values);
g_free (ctx->addresses);
g_free (ctx->vreg_types);
g_free (ctx->is_vphi);
g_free (ctx->vreg_cli_types);
g_free (ctx->is_dead);
g_free (ctx->unreachable);
g_free (ctx->gc_var_indexes);
g_ptr_array_free (ctx->phi_values, TRUE);
g_free (ctx->bblocks);
g_hash_table_destroy (ctx->region_to_handler);
g_hash_table_destroy (ctx->clause_to_handler);
g_hash_table_destroy (ctx->jit_callees);
g_ptr_array_free (ctx->callsite_list, TRUE);
g_free (ctx->method_name);
g_ptr_array_free (ctx->bblock_list, TRUE);
for (l = ctx->builders; l; l = l->next) {
LLVMBuilderRef builder = (LLVMBuilderRef)l->data;
LLVMDisposeBuilder (builder);
}
g_free (ctx);
}
static gboolean
is_linkonce_method (MonoMethod *method)
{
#ifdef TARGET_WASM
/*
* Under wasm, linkonce works, so use it instead of the dedup pass for wrappers at least.
* FIXME: Use for everything, i.e. can_dedup ().
* FIXME: Fails System.Core tests
* -> amodule->sorted_methods contains duplicates, screwing up jit tables.
*/
// FIXME: This works, but the aot data for the methods is still kept, so size still increases
#if 0
if (method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG)
return TRUE;
}
#endif
#endif
return FALSE;
}
/*
* mono_llvm_emit_method:
*
* Emit LLVM IL from the mono IL, and compile it to native code using LLVM.
*/
void
mono_llvm_emit_method (MonoCompile *cfg)
{
EmitContext *ctx;
char *method_name;
gboolean is_linkonce = FALSE;
int i;
if (cfg->skip)
return;
/* The code below might acquire the loader lock, so use it for global locking */
mono_loader_lock ();
ctx = g_new0 (EmitContext, 1);
ctx->cfg = cfg;
ctx->mempool = cfg->mempool;
/*
* This maps vregs to the LLVM instruction defining them
*/
ctx->values = g_new0 (LLVMValueRef, cfg->next_vreg);
/*
* This maps vregs for volatile variables to the LLVM instruction defining their
* address.
*/
ctx->addresses = g_new0 (LLVMValueRef, cfg->next_vreg);
ctx->vreg_types = g_new0 (LLVMTypeRef, cfg->next_vreg);
ctx->is_vphi = g_new0 (gboolean, cfg->next_vreg);
ctx->vreg_cli_types = g_new0 (MonoType*, cfg->next_vreg);
ctx->phi_values = g_ptr_array_sized_new (256);
/*
* This signals whenever the vreg was defined by a phi node with no input vars
* (i.e. all its input bblocks end with NOT_REACHABLE).
*/
ctx->is_dead = g_new0 (gboolean, cfg->next_vreg);
/* Whenever the bblock is unreachable */
ctx->unreachable = g_new0 (gboolean, cfg->max_block_num);
ctx->bblock_list = g_ptr_array_sized_new (256);
ctx->region_to_handler = g_hash_table_new (NULL, NULL);
ctx->clause_to_handler = g_hash_table_new (NULL, NULL);
ctx->callsite_list = g_ptr_array_new ();
ctx->jit_callees = g_hash_table_new (NULL, NULL);
if (cfg->compile_aot) {
ctx->module = &aot_module;
/*
* Allow the linker to discard duplicate copies of wrappers, generic instances etc. by using the 'linkonce'
* linkage for them. This requires the following:
* - the method needs to have a unique mangled name
* - llvmonly mode, since the code in aot-runtime.c would initialize got slots in the wrong aot image etc.
*/
if (ctx->module->llvm_only && ctx->module->static_link && is_linkonce_method (cfg->method))
is_linkonce = TRUE;
if (is_linkonce || mono_aot_is_externally_callable (cfg->method))
method_name = mono_aot_get_mangled_method_name (cfg->method);
else
method_name = mono_aot_get_method_name (cfg);
cfg->llvm_method_name = g_strdup (method_name);
} else {
ctx->module = init_jit_module ();
method_name = mono_method_full_name (cfg->method, TRUE);
}
ctx->method_name = method_name;
ctx->is_linkonce = is_linkonce;
if (cfg->compile_aot) {
ctx->lmodule = ctx->module->lmodule;
} else {
ctx->lmodule = LLVMModuleCreateWithName (g_strdup_printf ("jit-module-%s", cfg->method->name));
}
ctx->llvm_only = ctx->module->llvm_only;
#ifdef TARGET_WASM
ctx->emit_dummy_arg = TRUE;
#endif
emit_method_inner (ctx);
if (!ctx_ok (ctx)) {
if (ctx->lmethod) {
/* Need to add unused phi nodes as they can be referenced by other values */
LLVMBasicBlockRef phi_bb = LLVMAppendBasicBlock (ctx->lmethod, "PHI_BB");
LLVMBuilderRef builder;
builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, phi_bb);
for (i = 0; i < ctx->phi_values->len; ++i) {
LLVMValueRef v = (LLVMValueRef)g_ptr_array_index (ctx->phi_values, i);
if (LLVMGetInstructionParent (v) == NULL)
LLVMInsertIntoBuilder (builder, v);
}
if (ctx->module->llvm_only && ctx->module->static_link && cfg->interp) {
/* The caller will retry compilation */
LLVMDeleteFunction (ctx->lmethod);
} else if (ctx->module->llvm_only && ctx->module->static_link) {
// Keep a stub for the function since it might be called directly
int nbbs = LLVMCountBasicBlocks (ctx->lmethod);
LLVMBasicBlockRef *bblocks = g_new0 (LLVMBasicBlockRef, nbbs);
LLVMGetBasicBlocks (ctx->lmethod, bblocks);
for (int i = 0; i < nbbs; ++i)
LLVMDeleteBasicBlock (bblocks [i]);
LLVMBasicBlockRef entry_bb = LLVMAppendBasicBlock (ctx->lmethod, "ENTRY");
builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (builder, entry_bb);
ctx->builder = builder;
LLVMTypeRef sig = LLVMFunctionType0 (LLVMVoidType (), FALSE);
LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_nullref_exception));
LLVMBuildCall (builder, callee, NULL, 0, "");
LLVMBuildUnreachable (builder);
} else {
LLVMDeleteFunction (ctx->lmethod);
}
}
}
free_ctx (ctx);
mono_loader_unlock ();
}
static void
emit_method_inner (EmitContext *ctx)
{
MonoCompile *cfg = ctx->cfg;
MonoMethodSignature *sig;
MonoBasicBlock *bb;
LLVMTypeRef method_type;
LLVMValueRef method = NULL;
LLVMValueRef *values = ctx->values;
int i, max_block_num, bb_index;
gboolean llvmonly_fail = FALSE;
LLVMCallInfo *linfo;
LLVMModuleRef lmodule = ctx->lmodule;
BBInfo *bblocks;
GPtrArray *bblock_list = ctx->bblock_list;
MonoMethodHeader *header;
MonoExceptionClause *clause;
char **names;
LLVMBuilderRef entry_builder = NULL;
LLVMBasicBlockRef entry_bb = NULL;
if (cfg->gsharedvt && !cfg->llvm_only) {
set_failure (ctx, "gsharedvt");
return;
}
#if 0
{
static int count = 0;
count ++;
char *llvm_count_str = g_getenv ("LLVM_COUNT");
if (llvm_count_str) {
int lcount = atoi (llvm_count_str);
g_free (llvm_count_str);
if (count == lcount) {
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
fflush (stdout);
}
if (count > lcount) {
set_failure (ctx, "count");
return;
}
}
}
#endif
// If we come upon one of the init_method wrappers, we need to find
// the method that we have already emitted and tell LLVM that this
// managed method info for the wrapper is associated with this method
// we constructed ourselves from LLVM IR.
//
// This is necessary to unwind through the init_method, in the case that
// it has to run a static cctor that throws an exception
if (cfg->method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
if (info->subtype == WRAPPER_SUBTYPE_AOT_INIT) {
method = get_init_func (ctx->module, info->d.aot_init.subtype);
ctx->lmethod = method;
ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index);
const char *init_name = mono_marshal_get_aot_init_wrapper_name (info->d.aot_init.subtype);
ctx->method_name = g_strdup_printf ("%s_%s", ctx->module->global_prefix, init_name);
ctx->cfg->asm_symbol = g_strdup (ctx->method_name);
if (!cfg->llvm_only && ctx->module->external_symbols) {
LLVMSetLinkage (method, LLVMExternalLinkage);
LLVMSetVisibility (method, LLVMHiddenVisibility);
}
/* Not looked up at runtime */
g_hash_table_insert (ctx->module->no_method_table_lmethods, method, method);
goto after_codegen;
} else if (info->subtype == WRAPPER_SUBTYPE_LLVM_FUNC) {
g_assert (info->d.llvm_func.subtype == LLVM_FUNC_WRAPPER_GC_POLL);
if (cfg->compile_aot) {
method = ctx->module->gc_poll_cold_wrapper;
g_assert (method);
} else {
method = emit_icall_cold_wrapper (ctx->module, lmodule, MONO_JIT_ICALL_mono_threads_state_poll, FALSE);
}
ctx->lmethod = method;
ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index);
ctx->method_name = g_strdup (LLVMGetValueName (method)); //g_strdup_printf ("%s_%s", ctx->module->global_prefix, LLVMGetValueName (method));
ctx->cfg->asm_symbol = g_strdup (ctx->method_name);
if (!cfg->llvm_only && ctx->module->external_symbols) {
LLVMSetLinkage (method, LLVMExternalLinkage);
LLVMSetVisibility (method, LLVMHiddenVisibility);
}
goto after_codegen;
}
}
sig = mono_method_signature_internal (cfg->method);
ctx->sig = sig;
linfo = get_llvm_call_info (cfg, sig);
ctx->linfo = linfo;
if (!ctx_ok (ctx))
return;
if (cfg->rgctx_var)
linfo->rgctx_arg = TRUE;
else if (needs_extra_arg (ctx, cfg->method))
linfo->dummy_arg = TRUE;
ctx->method_type = method_type = sig_to_llvm_sig_full (ctx, sig, linfo);
if (!ctx_ok (ctx))
return;
method = LLVMAddFunction (lmodule, ctx->method_name, method_type);
ctx->lmethod = method;
if (!cfg->llvm_only)
LLVMSetFunctionCallConv (method, LLVMMono1CallConv);
/* if the method doesn't contain
* (1) a call (so it's a leaf method)
* (2) and no loops
* we can skip the GC safepoint on method entry. */
gboolean requires_safepoint;
requires_safepoint = cfg->has_calls;
if (!requires_safepoint) {
for (bb = cfg->bb_entry->next_bb; bb; bb = bb->next_bb) {
if (bb->loop_body_start || (bb->flags & BB_EXCEPTION_HANDLER)) {
requires_safepoint = TRUE;
}
}
}
if (cfg->method->wrapper_type) {
if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC || cfg->method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER) {
requires_safepoint = FALSE;
} else {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
switch (info->subtype) {
case WRAPPER_SUBTYPE_GSHAREDVT_IN:
case WRAPPER_SUBTYPE_GSHAREDVT_OUT:
case WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG:
case WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG:
/* Arguments are not used after the call */
requires_safepoint = FALSE;
break;
}
}
}
ctx->has_safepoints = requires_safepoint;
if (!cfg->llvm_only && mono_threads_are_safepoints_enabled () && requires_safepoint) {
if (!cfg->compile_aot) {
LLVMSetGC (method, "coreclr");
emit_gc_safepoint_poll (ctx->module, ctx->lmodule, cfg);
} else {
LLVMSetGC (method, "coreclr");
}
}
LLVMSetLinkage (method, LLVMPrivateLinkage);
mono_llvm_add_func_attr (method, LLVM_ATTR_UW_TABLE);
if (cfg->disable_omit_fp)
mono_llvm_add_func_attr_nv (method, "frame-pointer", "all");
if (cfg->compile_aot) {
if (mono_aot_is_externally_callable (cfg->method)) {
LLVMSetLinkage (method, LLVMExternalLinkage);
} else {
LLVMSetLinkage (method, LLVMInternalLinkage);
//all methods have internal visibility when doing llvm_only
if (!cfg->llvm_only && ctx->module->external_symbols) {
LLVMSetLinkage (method, LLVMExternalLinkage);
LLVMSetVisibility (method, LLVMHiddenVisibility);
}
}
if (ctx->is_linkonce) {
LLVMSetLinkage (method, LLVMLinkOnceAnyLinkage);
LLVMSetVisibility (method, LLVMDefaultVisibility);
}
} else {
LLVMSetLinkage (method, LLVMExternalLinkage);
}
if (cfg->method->save_lmf && !cfg->llvm_only) {
set_failure (ctx, "lmf");
return;
}
if (sig->pinvoke && cfg->method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE && !cfg->llvm_only) {
set_failure (ctx, "pinvoke signature");
return;
}
#ifdef TARGET_WASM
if (ctx->module->interp && cfg->header->code_size > 100000 && !cfg->interp_entry_only) {
/* Large methods slow down llvm too much */
set_failure (ctx, "il code too large.");
return;
}
#endif
header = cfg->header;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT && clause->flags != MONO_EXCEPTION_CLAUSE_NONE) {
if (cfg->llvm_only) {
if (!cfg->deopt && !cfg->interp_entry_only)
llvmonly_fail = TRUE;
} else {
set_failure (ctx, "non-finally/catch/fault clause.");
return;
}
}
}
if (header->num_clauses || (cfg->method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) || cfg->no_inline)
/* We can't handle inlined methods with clauses */
mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE);
for (int i = 0; i < cfg->header->num_clauses; i++) {
MonoExceptionClause *clause = &cfg->header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE || clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
ctx->has_catch = TRUE;
}
if (linfo->rgctx_arg) {
ctx->rgctx_arg = LLVMGetParam (method, linfo->rgctx_arg_pindex);
ctx->rgctx_arg_pindex = linfo->rgctx_arg_pindex;
/*
* We mark the rgctx parameter with the inreg attribute, which is mapped to
* MONO_ARCH_RGCTX_REG in the Mono calling convention in llvm, i.e.
* CC_X86_64_Mono in X86CallingConv.td.
*/
if (!ctx->llvm_only)
mono_llvm_add_param_attr (ctx->rgctx_arg, LLVM_ATTR_IN_REG);
LLVMSetValueName (ctx->rgctx_arg, "rgctx");
} else {
ctx->rgctx_arg_pindex = -1;
}
if (cfg->vret_addr) {
values [cfg->vret_addr->dreg] = LLVMGetParam (method, linfo->vret_arg_pindex);
LLVMSetValueName (values [cfg->vret_addr->dreg], "vret");
if (linfo->ret.storage == LLVMArgVtypeByRef) {
mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_STRUCT_RET);
mono_llvm_add_param_attr (LLVMGetParam (method, linfo->vret_arg_pindex), LLVM_ATTR_NO_ALIAS);
}
}
if (sig->hasthis) {
ctx->this_arg_pindex = linfo->this_arg_pindex;
ctx->this_arg = LLVMGetParam (method, linfo->this_arg_pindex);
values [cfg->args [0]->dreg] = ctx->this_arg;
LLVMSetValueName (values [cfg->args [0]->dreg], "this");
}
if (linfo->dummy_arg)
LLVMSetValueName (LLVMGetParam (method, linfo->dummy_arg_pindex), "dummy_arg");
names = g_new (char *, sig->param_count);
mono_method_get_param_names (cfg->method, (const char **) names);
/* Set parameter names/attributes */
for (i = 0; i < sig->param_count; ++i) {
LLVMArgInfo *ainfo = &linfo->args [i + sig->hasthis];
char *name;
int pindex = ainfo->pindex + ainfo->ndummy_fpargs;
int j;
for (j = 0; j < ainfo->ndummy_fpargs; ++j) {
name = g_strdup_printf ("dummy_%d_%d", i, j);
LLVMSetValueName (LLVMGetParam (method, ainfo->pindex + j), name);
g_free (name);
}
if (ainfo->storage == LLVMArgVtypeInReg && ainfo->pair_storage [0] == LLVMArgNone && ainfo->pair_storage [1] == LLVMArgNone)
continue;
values [cfg->args [i + sig->hasthis]->dreg] = LLVMGetParam (method, pindex);
if (ainfo->storage == LLVMArgGsharedvtFixed || ainfo->storage == LLVMArgGsharedvtFixedVtype) {
if (names [i] && names [i][0] != '\0')
name = g_strdup_printf ("p_arg_%s", names [i]);
else
name = g_strdup_printf ("p_arg_%d", i);
} else {
if (names [i] && names [i][0] != '\0')
name = g_strdup_printf ("arg_%s", names [i]);
else
name = g_strdup_printf ("arg_%d", i);
}
LLVMSetValueName (LLVMGetParam (method, pindex), name);
g_free (name);
if (ainfo->storage == LLVMArgVtypeByVal)
mono_llvm_add_param_attr (LLVMGetParam (method, pindex), LLVM_ATTR_BY_VAL);
if (ainfo->storage == LLVMArgVtypeByRef || ainfo->storage == LLVMArgVtypeAddr) {
/* For OP_LDADDR */
cfg->args [i + sig->hasthis]->opcode = OP_VTARG_ADDR;
}
#ifdef TARGET_WASM
if (ainfo->storage == LLVMArgVtypeByRef) {
/* This causes llvm to make a copy of the value which is what we need */
mono_llvm_add_param_byval_attr (LLVMGetParam (method, pindex), LLVMGetElementType (LLVMTypeOf (LLVMGetParam (method, pindex))));
}
#endif
}
g_free (names);
if (ctx->module->emit_dwarf && cfg->compile_aot && mono_debug_enabled ()) {
ctx->minfo = mono_debug_lookup_method (cfg->method);
ctx->dbg_md = emit_dbg_subprogram (ctx, cfg, method, ctx->method_name);
}
max_block_num = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
max_block_num = MAX (max_block_num, bb->block_num);
ctx->bblocks = bblocks = g_new0 (BBInfo, max_block_num + 1);
/* Add branches between non-consecutive bblocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
bb->next_bb != bb->last_ins->inst_false_bb) {
MonoInst *inst = (MonoInst*)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
inst->opcode = OP_BR;
inst->inst_target_bb = bb->last_ins->inst_false_bb;
mono_bblock_add_inst (bb, inst);
}
}
/*
* Make a first pass over the code to precreate PHI nodes/set INDIRECT flags.
*/
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
LLVMBuilderRef builder;
char *dname;
char dname_buf[128];
builder = create_builder (ctx);
for (ins = bb->code; ins; ins = ins->next) {
switch (ins->opcode) {
case OP_PHI:
case OP_FPHI:
case OP_VPHI:
case OP_XPHI: {
LLVMTypeRef phi_type = llvm_type_to_stack_type (cfg, type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)));
if (!ctx_ok (ctx))
return;
if (cfg->interp_entry_only)
break;
if (ins->opcode == OP_VPHI) {
/* Treat valuetype PHI nodes as operating on the address itself */
g_assert (ins->klass);
phi_type = LLVMPointerType (type_to_llvm_type (ctx, m_class_get_byval_arg (ins->klass)), 0);
}
/*
* Have to precreate these, as they can be referenced by
* earlier instructions.
*/
sprintf (dname_buf, "t%d", ins->dreg);
dname = dname_buf;
values [ins->dreg] = LLVMBuildPhi (builder, phi_type, dname);
if (ins->opcode == OP_VPHI)
ctx->addresses [ins->dreg] = values [ins->dreg];
g_ptr_array_add (ctx->phi_values, values [ins->dreg]);
/*
* Set the expected type of the incoming arguments since these have
* to have the same type.
*/
for (i = 0; i < ins->inst_phi_args [0]; i++) {
int sreg1 = ins->inst_phi_args [i + 1];
if (sreg1 != -1) {
if (ins->opcode == OP_VPHI)
ctx->is_vphi [sreg1] = TRUE;
ctx->vreg_types [sreg1] = phi_type;
}
}
break;
}
case OP_LDADDR:
((MonoInst*)ins->inst_p0)->flags |= MONO_INST_INDIRECT;
break;
default:
break;
}
}
}
/*
* Create an ordering for bblocks, use the depth first order first, then
* put the exception handling bblocks last.
*/
for (bb_index = 0; bb_index < cfg->num_bblocks; ++bb_index) {
bb = cfg->bblocks [bb_index];
if (!(bb->region != -1 && !MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_TRY))) {
g_ptr_array_add (bblock_list, bb);
bblocks [bb->block_num].added = TRUE;
}
}
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (!bblocks [bb->block_num].added)
g_ptr_array_add (bblock_list, bb);
}
/*
* Second pass: generate code.
*/
// Emit entry point
entry_builder = create_builder (ctx);
entry_bb = get_bb (ctx, cfg->bb_entry);
LLVMPositionBuilderAtEnd (entry_builder, entry_bb);
emit_entry_bb (ctx, entry_builder);
if (llvmonly_fail)
/*
* In llvmonly mode, we want to emit an llvm method for every method even if it fails to compile,
* so direct calls can be made from outside the assembly.
*/
goto after_codegen_1;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
int clause_index;
char name [128];
if (ctx->cfg->interp_entry_only || !(bb->region != -1 && (bb->flags & BB_EXCEPTION_HANDLER)))
continue;
if (ctx->cfg->deopt && MONO_REGION_FLAGS (bb->region) == MONO_EXCEPTION_CLAUSE_FILTER)
continue;
clause_index = MONO_REGION_CLAUSE_INDEX (bb->region);
g_hash_table_insert (ctx->region_to_handler, GUINT_TO_POINTER (mono_get_block_region_notry (cfg, bb->region)), bb);
g_hash_table_insert (ctx->clause_to_handler, GINT_TO_POINTER (clause_index), bb);
/*
* Create a new bblock which CALL_HANDLER/landing pads can branch to, because branching to the
* LLVM bblock containing a landing pad causes problems for the
* LLVM optimizer passes.
*/
sprintf (name, "BB%d_CALL_HANDLER_TARGET", bb->block_num);
ctx->bblocks [bb->block_num].call_handler_target_bb = LLVMAppendBasicBlock (ctx->lmethod, name);
}
// Make landing pads first
ctx->exc_meta = g_hash_table_new_full (NULL, NULL, NULL, NULL);
if (ctx->llvm_only && !ctx->cfg->interp_entry_only) {
size_t group_index = 0;
while (group_index < cfg->header->num_clauses) {
if (cfg->clause_is_dead [group_index]) {
group_index ++;
continue;
}
int count = 0;
size_t cursor = group_index;
while (cursor < cfg->header->num_clauses &&
CLAUSE_START (&cfg->header->clauses [cursor]) == CLAUSE_START (&cfg->header->clauses [group_index]) &&
CLAUSE_END (&cfg->header->clauses [cursor]) == CLAUSE_END (&cfg->header->clauses [group_index])) {
count++;
cursor++;
}
LLVMBasicBlockRef lpad_bb = emit_landing_pad (ctx, group_index, count);
intptr_t key = CLAUSE_END (&cfg->header->clauses [group_index]);
g_hash_table_insert (ctx->exc_meta, (gpointer)key, lpad_bb);
group_index = cursor;
}
}
for (bb_index = 0; bb_index < bblock_list->len; ++bb_index) {
bb = (MonoBasicBlock*)g_ptr_array_index (bblock_list, bb_index);
// Prune unreachable mono BBs.
if (!(bb == cfg->bb_entry || bb->in_count > 0))
continue;
process_bb (ctx, bb);
if (!ctx_ok (ctx))
return;
}
g_hash_table_destroy (ctx->exc_meta);
mono_memory_barrier ();
/* Add incoming phi values */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
GSList *l, *ins_list;
ins_list = bblocks [bb->block_num].phi_nodes;
for (l = ins_list; l; l = l->next) {
PhiNode *node = (PhiNode*)l->data;
MonoInst *phi = node->phi;
int sreg1 = node->sreg;
LLVMBasicBlockRef in_bb;
if (sreg1 == -1)
continue;
in_bb = get_end_bb (ctx, node->in_bb);
if (ctx->unreachable [node->in_bb->block_num])
continue;
if (phi->opcode == OP_VPHI) {
g_assert (LLVMTypeOf (ctx->addresses [sreg1]) == LLVMTypeOf (values [phi->dreg]));
LLVMAddIncoming (values [phi->dreg], &ctx->addresses [sreg1], &in_bb, 1);
} else {
if (!values [sreg1]) {
/* Can happen with values in EH clauses */
set_failure (ctx, "incoming phi sreg1");
return;
}
if (LLVMTypeOf (values [sreg1]) != LLVMTypeOf (values [phi->dreg])) {
set_failure (ctx, "incoming phi arg type mismatch");
return;
}
g_assert (LLVMTypeOf (values [sreg1]) == LLVMTypeOf (values [phi->dreg]));
LLVMAddIncoming (values [phi->dreg], &values [sreg1], &in_bb, 1);
}
}
}
/* Nullify empty phi instructions */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
GSList *l, *ins_list;
ins_list = bblocks [bb->block_num].phi_nodes;
for (l = ins_list; l; l = l->next) {
PhiNode *node = (PhiNode*)l->data;
MonoInst *phi = node->phi;
LLVMValueRef phi_ins = values [phi->dreg];
if (!phi_ins)
/* Already removed */
continue;
if (LLVMCountIncoming (phi_ins) == 0) {
mono_llvm_replace_uses_of (phi_ins, LLVMConstNull (LLVMTypeOf (phi_ins)));
LLVMInstructionEraseFromParent (phi_ins);
values [phi->dreg] = NULL;
}
}
}
/* Create the SWITCH statements for ENDFINALLY instructions */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
BBInfo *info = &bblocks [bb->block_num];
GSList *l;
for (l = info->endfinally_switch_ins_list; l; l = l->next) {
LLVMValueRef switch_ins = (LLVMValueRef)l->data;
GSList *bb_list = info->call_handler_return_bbs;
GSList *bb_list_iter;
i = 0;
for (bb_list_iter = bb_list; bb_list_iter; bb_list_iter = g_slist_next (bb_list_iter)) {
LLVMAddCase (switch_ins, LLVMConstInt (LLVMInt32Type (), i + 1, FALSE), (LLVMBasicBlockRef)bb_list_iter->data);
i ++;
}
}
}
ctx->module->max_method_idx = MAX (ctx->module->max_method_idx, cfg->method_index);
after_codegen_1:
if (llvmonly_fail) {
/*
* FIXME: Maybe fallback to interpreter
*/
static LLVMTypeRef sig;
ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ctx->inited_bb);
char *name = mono_method_get_full_name (cfg->method);
int len = strlen (name);
LLVMTypeRef type = LLVMArrayType (LLVMInt8Type (), len + 1);
LLVMValueRef name_var = LLVMAddGlobal (ctx->lmodule, type, "missing_method_name");
LLVMSetVisibility (name_var, LLVMHiddenVisibility);
LLVMSetLinkage (name_var, LLVMInternalLinkage);
LLVMSetInitializer (name_var, mono_llvm_create_constant_data_array ((guint8*)name, len + 1));
mono_llvm_set_is_constant (name_var);
g_free (name);
if (!sig)
sig = LLVMFunctionType1 (LLVMVoidType (), ctx->module->ptr_type, FALSE);
LLVMValueRef callee = get_callee (ctx, sig, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mini_llvmonly_throw_aot_failed_exception));
LLVMValueRef args [] = { convert (ctx, name_var, ctx->module->ptr_type) };
LLVMBuildCall (ctx->builder, callee, args, 1, "");
LLVMBuildUnreachable (ctx->builder);
}
/* Initialize the method if needed */
if (cfg->compile_aot) {
// FIXME: Add more shared got entries
ctx->builder = create_builder (ctx);
LLVMPositionBuilderAtEnd (ctx->builder, ctx->init_bb);
// FIXME: beforefieldinit
/*
* NATIVE_TO_MANAGED methods might be called on a thread not attached to the runtime, so they are initialized when loaded
* in load_method ().
*/
gboolean needs_init = ctx->cfg->got_access_count > 0;
MonoMethod *cctor = NULL;
if (!needs_init && (cctor = mono_class_get_cctor (cfg->method->klass))) {
/* Needs init to run the cctor */
if (cfg->method->flags & METHOD_ATTRIBUTE_STATIC)
needs_init = TRUE;
if (cctor == cfg->method)
needs_init = FALSE;
// If we are a constructor, we need to init so the static
// constructor gets called.
if (!strcmp (cfg->method->name, ".ctor"))
needs_init = TRUE;
}
if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
needs_init = FALSE;
if (needs_init)
emit_method_init (ctx);
else
LLVMBuildBr (ctx->builder, ctx->inited_bb);
// Was observing LLVM moving field accesses into the caller's method
// body before the init call (the inlined one), leading to NULL derefs
// after the init_method returns (GOT is filled out though)
if (needs_init)
mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE);
}
if (mini_get_debug_options ()->llvm_disable_inlining)
mono_llvm_add_func_attr (method, LLVM_ATTR_NO_INLINE);
after_codegen:
if (cfg->compile_aot)
g_ptr_array_add (ctx->module->cfgs, cfg);
if (cfg->llvm_only) {
/*
* Add the contents of ctx->callsite_list to module->callsite_list.
* We can't do this earlier, as it contains llvm instructions which can be
* freed if compilation fails.
* FIXME: Get rid of this when all methods can be llvm compiled.
*/
for (int i = 0; i < ctx->callsite_list->len; ++i)
g_ptr_array_add (ctx->module->callsite_list, g_ptr_array_index (ctx->callsite_list, i));
}
if (cfg->verbose_level > 1) {
g_print ("\n*** Unoptimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE));
if (cfg->compile_aot) {
mono_llvm_dump_value (method);
} else {
mono_llvm_dump_module (ctx->lmodule);
}
g_print ("***\n\n");
}
if (cfg->compile_aot && !cfg->llvm_only)
mark_as_used (ctx->module, method);
if (!cfg->llvm_only) {
LLVMValueRef md_args [16];
LLVMValueRef md_node;
int method_index;
if (cfg->compile_aot)
method_index = mono_aot_get_method_index (cfg->orig_method);
else
method_index = 1;
md_args [0] = LLVMMDString (ctx->method_name, strlen (ctx->method_name));
md_args [1] = LLVMConstInt (LLVMInt32Type (), method_index, FALSE);
md_node = LLVMMDNode (md_args, 2);
LLVMAddNamedMetadataOperand (lmodule, "mono.function_indexes", md_node);
//LLVMSetMetadata (method, md_kind, LLVMMDNode (&md_arg, 1));
}
if (cfg->compile_aot) {
/* Don't generate native code, keep the LLVM IR */
if (cfg->verbose_level) {
char *name = mono_method_get_full_name (cfg->method);
printf ("%s emitted as %s\n", name, ctx->method_name);
g_free (name);
}
#if 0
int err = LLVMVerifyFunction (ctx->lmethod, LLVMPrintMessageAction);
if (err != 0)
LLVMDumpValue (ctx->lmethod);
g_assert (err == 0);
#endif
} else {
//LLVMVerifyFunction (method, 0);
llvm_jit_finalize_method (ctx);
}
if (ctx->module->method_to_lmethod)
g_hash_table_insert (ctx->module->method_to_lmethod, cfg->method, ctx->lmethod);
if (ctx->module->idx_to_lmethod)
g_hash_table_insert (ctx->module->idx_to_lmethod, GINT_TO_POINTER (cfg->method_index), ctx->lmethod);
if (ctx->llvm_only && m_class_is_valuetype (cfg->orig_method->klass) && !(cfg->orig_method->flags & METHOD_ATTRIBUTE_STATIC))
emit_unbox_tramp (ctx, ctx->method_name, ctx->method_type, ctx->lmethod, cfg->method_index);
}
/*
* mono_llvm_create_vars:
*
* Same as mono_arch_create_vars () for LLVM.
*/
void
mono_llvm_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
sig = mono_method_signature_internal (cfg->method);
if (cfg->gsharedvt && cfg->llvm_only) {
gboolean vretaddr = FALSE;
if (mini_is_gsharedvt_variable_signature (sig) && sig->ret->type != MONO_TYPE_VOID) {
vretaddr = TRUE;
} else {
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
LLVMCallInfo *linfo;
linfo = get_llvm_call_info (cfg, sig);
vretaddr = (linfo->ret.storage == LLVMArgVtypeRetAddr || linfo->ret.storage == LLVMArgVtypeByRef || linfo->ret.storage == LLVMArgGsharedvtFixed || linfo->ret.storage == LLVMArgGsharedvtVariable || linfo->ret.storage == LLVMArgGsharedvtFixedVtype);
}
if (vretaddr) {
/*
* Creating vret_addr forces CEE_SETRET to store the result into it,
* so we don't have to generate any code in our OP_SETRET case.
*/
cfg->vret_addr = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_get_intptr_class ()), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
} else {
mono_arch_create_vars (cfg);
}
cfg->lmf_ir = TRUE;
}
/*
* mono_llvm_emit_call:
*
* Same as mono_arch_emit_call () for LLVM.
*/
void
mono_llvm_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *in;
MonoMethodSignature *sig;
int i, n;
LLVMArgInfo *ainfo;
sig = call->signature;
n = sig->param_count + sig->hasthis;
if (sig->call_convention == MONO_CALL_VARARG) {
cfg->exception_message = g_strdup ("varargs");
cfg->disable_llvm = TRUE;
return;
}
call->cinfo = get_llvm_call_info (cfg, sig);
if (cfg->disable_llvm)
return;
for (i = 0; i < n; ++i) {
MonoInst *ins;
ainfo = call->cinfo->args + i;
in = call->args [i];
/* Simply remember the arguments */
switch (ainfo->storage) {
case LLVMArgNormal: {
MonoType *t = (sig->hasthis && i == 0) ? m_class_get_byval_arg (mono_get_intptr_class ()) : ainfo->type;
int opcode;
opcode = mono_type_to_regmove (cfg, t);
if (opcode == OP_FMOVE) {
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
} else if (opcode == OP_LMOVE) {
MONO_INST_NEW (cfg, ins, OP_LMOVE);
ins->dreg = mono_alloc_lreg (cfg);
} else if (opcode == OP_RMOVE) {
MONO_INST_NEW (cfg, ins, OP_RMOVE);
ins->dreg = mono_alloc_freg (cfg);
} else {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
}
ins->sreg1 = in->dreg;
break;
}
case LLVMArgVtypeByVal:
case LLVMArgVtypeByRef:
case LLVMArgVtypeInReg:
case LLVMArgVtypeAddr:
case LLVMArgVtypeAsScalar:
case LLVMArgAsIArgs:
case LLVMArgAsFpArgs:
case LLVMArgGsharedvtVariable:
case LLVMArgGsharedvtFixed:
case LLVMArgGsharedvtFixedVtype:
case LLVMArgWasmVtypeAsScalar:
MONO_INST_NEW (cfg, ins, OP_LLVM_OUTARG_VT);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = in->dreg;
ins->inst_p0 = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMArgInfo));
memcpy (ins->inst_p0, ainfo, sizeof (LLVMArgInfo));
ins->inst_vtype = ainfo->type;
ins->klass = mono_class_from_mono_type_internal (ainfo->type);
break;
default:
cfg->exception_message = g_strdup ("ainfo->storage");
cfg->disable_llvm = TRUE;
return;
}
if (!cfg->disable_llvm) {
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, 0, FALSE);
}
}
}
static inline void
add_func (LLVMModuleRef module, const char *name, LLVMTypeRef ret_type, LLVMTypeRef *param_types, int nparams)
{
LLVMAddFunction (module, name, LLVMFunctionType (ret_type, param_types, nparams, FALSE));
}
static LLVMValueRef
add_intrins (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef *params, int nparams)
{
return mono_llvm_register_overloaded_intrinsic (module, id, params, nparams);
}
static LLVMValueRef
add_intrins1 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1)
{
return mono_llvm_register_overloaded_intrinsic (module, id, ¶m1, 1);
}
static LLVMValueRef
add_intrins2 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2)
{
LLVMTypeRef params [] = { param1, param2 };
return mono_llvm_register_overloaded_intrinsic (module, id, params, 2);
}
static LLVMValueRef
add_intrins3 (LLVMModuleRef module, IntrinsicId id, LLVMTypeRef param1, LLVMTypeRef param2, LLVMTypeRef param3)
{
LLVMTypeRef params [] = { param1, param2, param3 };
return mono_llvm_register_overloaded_intrinsic (module, id, params, 3);
}
static void
add_intrinsic (LLVMModuleRef module, int id)
{
/* Register simple intrinsics */
LLVMValueRef intrins = mono_llvm_register_intrinsic (module, (IntrinsicId)id);
if (intrins) {
g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins);
return;
}
if (intrin_arm64_ovr [id] != 0) {
llvm_ovr_tag_t spec = intrin_arm64_ovr [id];
for (int vw = 0; vw < INTRIN_vectorwidths; ++vw) {
for (int ew = 0; ew < INTRIN_elementwidths; ++ew) {
llvm_ovr_tag_t vec_bit = INTRIN_vector128 >> ((INTRIN_vectorwidths - 1) - vw);
llvm_ovr_tag_t elem_bit = INTRIN_int8 << ew;
llvm_ovr_tag_t test = vec_bit | elem_bit;
if ((spec & test) == test) {
uint8_t kind = intrin_kind [id];
LLVMTypeRef distinguishing_type = intrin_types [vw][ew];
if (kind == INTRIN_kind_ftoi && (elem_bit & (INTRIN_int32 | INTRIN_int64))) {
/*
* @llvm.aarch64.neon.fcvtas.v4i32.v4f32
* @llvm.aarch64.neon.fcvtas.v2i64.v2f64
*/
intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew + 2]);
} else if (kind == INTRIN_kind_widen) {
/*
* @llvm.aarch64.neon.saddlp.v2i64.v4i32
* @llvm.aarch64.neon.saddlp.v4i16.v8i8
*/
intrins = add_intrins2 (module, id, distinguishing_type, intrin_types [vw][ew - 1]);
} else if (kind == INTRIN_kind_widen_across) {
/*
* @llvm.aarch64.neon.saddlv.i64.v4i32
* @llvm.aarch64.neon.saddlv.i32.v8i16
* @llvm.aarch64.neon.saddlv.i32.v16i8
* i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9.
*/
int associated_prim = MAX(ew + 1, 2);
LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim];
intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type);
} else if (kind == INTRIN_kind_across) {
/*
* @llvm.aarch64.neon.uaddv.i64.v4i64
* @llvm.aarch64.neon.uaddv.i32.v4i32
* @llvm.aarch64.neon.uaddv.i32.v8i16
* @llvm.aarch64.neon.uaddv.i32.v16i8
* i8/i16 return types for NEON intrinsics will make isel fail as of LLVM 9.
*/
int associated_prim = MAX(ew, 2);
LLVMTypeRef associated_scalar_type = intrin_types [0][associated_prim];
intrins = add_intrins2 (module, id, associated_scalar_type, distinguishing_type);
} else if (kind == INTRIN_kind_arm64_dot_prod) {
/*
* @llvm.aarch64.neon.sdot.v2i32.v8i8
* @llvm.aarch64.neon.sdot.v4i32.v16i8
*/
LLVMTypeRef associated_type = intrin_types [vw][0];
intrins = add_intrins2 (module, id, distinguishing_type, associated_type);
} else
intrins = add_intrins1 (module, id, distinguishing_type);
int key = key_from_id_and_tag (id, test);
g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (key), intrins);
}
}
}
return;
}
/* Register overloaded intrinsics */
switch (id) {
#define INTRINS(intrin_name, llvm_id, arch)
#define INTRINS_OVR(intrin_name, llvm_id, arch, llvm_type) case INTRINS_ ## intrin_name: intrins = add_intrins1(module, id, llvm_type); break;
#define INTRINS_OVR_2_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2) case INTRINS_ ## intrin_name: intrins = add_intrins2(module, id, llvm_type1, llvm_type2); break;
#define INTRINS_OVR_3_ARG(intrin_name, llvm_id, arch, llvm_type1, llvm_type2, llvm_type3) case INTRINS_ ## intrin_name: intrins = add_intrins3(module, id, llvm_type1, llvm_type2, llvm_type3); break;
#define INTRINS_OVR_TAG(...)
#define INTRINS_OVR_TAG_KIND(...)
#include "llvm-intrinsics.h"
default:
g_assert_not_reached ();
break;
}
g_assert (intrins);
g_hash_table_insert (intrins_id_to_intrins, GINT_TO_POINTER (id), intrins);
}
static LLVMValueRef
get_intrins_from_module (LLVMModuleRef lmodule, int id)
{
LLVMValueRef res;
res = (LLVMValueRef)g_hash_table_lookup (intrins_id_to_intrins, GINT_TO_POINTER (id));
g_assert (res);
return res;
}
static LLVMValueRef
get_intrins (EmitContext *ctx, int id)
{
return get_intrins_from_module (ctx->lmodule, id);
}
static void
add_intrinsics (LLVMModuleRef module)
{
int i;
/* Emit declarations of instrinsics */
/*
* It would be nicer to emit only the intrinsics actually used, but LLVM's Module
* type doesn't seem to do any locking.
*/
for (i = 0; i < INTRINS_NUM; ++i)
add_intrinsic (module, i);
/* EH intrinsics */
add_func (module, "mono_personality", LLVMVoidType (), NULL, 0);
add_func (module, "llvm_resume_unwind_trampoline", LLVMVoidType (), NULL, 0);
}
static void
add_types (MonoLLVMModule *module)
{
module->ptr_type = LLVMPointerType (TARGET_SIZEOF_VOID_P == 8 ? LLVMInt64Type () : LLVMInt32Type (), 0);
}
void
mono_llvm_init (gboolean enable_jit)
{
intrin_types [0][0] = i1_t = LLVMInt8Type ();
intrin_types [0][1] = i2_t = LLVMInt16Type ();
intrin_types [0][2] = i4_t = LLVMInt32Type ();
intrin_types [0][3] = i8_t = LLVMInt64Type ();
intrin_types [0][4] = r4_t = LLVMFloatType ();
intrin_types [0][5] = r8_t = LLVMDoubleType ();
intrin_types [1][0] = v64_i1_t = LLVMVectorType (LLVMInt8Type (), 8);
intrin_types [1][1] = v64_i2_t = LLVMVectorType (LLVMInt16Type (), 4);
intrin_types [1][2] = v64_i4_t = LLVMVectorType (LLVMInt32Type (), 2);
intrin_types [1][3] = v64_i8_t = LLVMVectorType (LLVMInt64Type (), 1);
intrin_types [1][4] = v64_r4_t = LLVMVectorType (LLVMFloatType (), 2);
intrin_types [1][5] = v64_r8_t = LLVMVectorType (LLVMDoubleType (), 1);
intrin_types [2][0] = v128_i1_t = sse_i1_t = type_to_sse_type (MONO_TYPE_I1);
intrin_types [2][1] = v128_i2_t = sse_i2_t = type_to_sse_type (MONO_TYPE_I2);
intrin_types [2][2] = v128_i4_t = sse_i4_t = type_to_sse_type (MONO_TYPE_I4);
intrin_types [2][3] = v128_i8_t = sse_i8_t = type_to_sse_type (MONO_TYPE_I8);
intrin_types [2][4] = v128_r4_t = sse_r4_t = type_to_sse_type (MONO_TYPE_R4);
intrin_types [2][5] = v128_r8_t = sse_r8_t = type_to_sse_type (MONO_TYPE_R8);
intrins_id_to_intrins = g_hash_table_new (NULL, NULL);
void_func_t = LLVMFunctionType0 (LLVMVoidType (), FALSE);
if (enable_jit)
mono_llvm_jit_init ();
}
void
mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager)
{
MonoLLVMModule *module = (MonoLLVMModule*)mem_manager->llvm_module;
int i;
if (!module)
return;
g_hash_table_destroy (module->llvm_types);
mono_llvm_dispose_ee (module->mono_ee);
if (module->bb_names) {
for (i = 0; i < module->bb_names_len; ++i)
g_free (module->bb_names [i]);
g_free (module->bb_names);
}
//LLVMDisposeModule (module->module);
g_free (module);
mem_manager->llvm_module = NULL;
}
void
mono_llvm_create_aot_module (MonoAssembly *assembly, const char *global_prefix, int initial_got_size, LLVMModuleFlags flags)
{
MonoLLVMModule *module = &aot_module;
gboolean emit_dwarf = (flags & LLVM_MODULE_FLAG_DWARF) ? 1 : 0;
#ifdef TARGET_WIN32_MSVC
gboolean emit_codeview = (flags & LLVM_MODULE_FLAG_CODEVIEW) ? 1 : 0;
#endif
gboolean static_link = (flags & LLVM_MODULE_FLAG_STATIC) ? 1 : 0;
gboolean llvm_only = (flags & LLVM_MODULE_FLAG_LLVM_ONLY) ? 1 : 0;
gboolean interp = (flags & LLVM_MODULE_FLAG_INTERP) ? 1 : 0;
/* Delete previous module */
g_hash_table_destroy (module->plt_entries);
if (module->lmodule)
LLVMDisposeModule (module->lmodule);
memset (module, 0, sizeof (aot_module));
module->lmodule = LLVMModuleCreateWithName ("aot");
module->assembly = assembly;
module->global_prefix = g_strdup (global_prefix);
module->eh_frame_symbol = g_strdup_printf ("%s_eh_frame", global_prefix);
module->get_method_symbol = g_strdup_printf ("%s_get_method", global_prefix);
module->get_unbox_tramp_symbol = g_strdup_printf ("%s_get_unbox_tramp", global_prefix);
module->init_aotconst_symbol = g_strdup_printf ("%s_init_aotconst", global_prefix);
module->external_symbols = TRUE;
module->emit_dwarf = emit_dwarf;
module->static_link = static_link;
module->llvm_only = llvm_only;
module->interp = interp;
/* The first few entries are reserved */
module->max_got_offset = initial_got_size;
module->context = LLVMGetGlobalContext ();
module->cfgs = g_ptr_array_new ();
module->aotconst_vars = g_hash_table_new (NULL, NULL);
module->llvm_types = g_hash_table_new (NULL, NULL);
module->plt_entries = g_hash_table_new (g_str_hash, g_str_equal);
module->plt_entries_ji = g_hash_table_new (NULL, NULL);
module->direct_callables = g_hash_table_new (g_str_hash, g_str_equal);
module->idx_to_lmethod = g_hash_table_new (NULL, NULL);
module->method_to_lmethod = g_hash_table_new (NULL, NULL);
module->method_to_call_info = g_hash_table_new (NULL, NULL);
module->idx_to_unbox_tramp = g_hash_table_new (NULL, NULL);
module->no_method_table_lmethods = g_hash_table_new (NULL, NULL);
module->callsite_list = g_ptr_array_new ();
if (llvm_only)
/* clang ignores our debug info because it has an invalid version */
module->emit_dwarf = FALSE;
add_intrinsics (module->lmodule);
add_types (module);
#ifdef MONO_ARCH_LLVM_TARGET_LAYOUT
LLVMSetDataLayout (module->lmodule, MONO_ARCH_LLVM_TARGET_LAYOUT);
#else
g_assert_not_reached ();
#endif
#ifdef MONO_ARCH_LLVM_TARGET_TRIPLE
LLVMSetTarget (module->lmodule, MONO_ARCH_LLVM_TARGET_TRIPLE);
#endif
if (module->emit_dwarf) {
char *dir, *build_info, *s, *cu_name;
module->di_builder = mono_llvm_create_di_builder (module->lmodule);
// FIXME:
dir = g_strdup (".");
build_info = mono_get_runtime_build_info ();
s = g_strdup_printf ("Mono AOT Compiler %s (LLVM)", build_info);
cu_name = g_path_get_basename (assembly->image->name);
module->cu = mono_llvm_di_create_compile_unit (module->di_builder, cu_name, dir, s);
g_free (dir);
g_free (build_info);
g_free (s);
}
#ifdef TARGET_WIN32_MSVC
if (emit_codeview) {
LLVMValueRef codeview_option_args[3];
codeview_option_args[0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
codeview_option_args[1] = LLVMMDString ("CodeView", 8);
codeview_option_args[2] = LLVMConstInt (LLVMInt32Type (), 1, FALSE);
LLVMAddNamedMetadataOperand (module->lmodule, "llvm.module.flags", LLVMMDNode (codeview_option_args, G_N_ELEMENTS (codeview_option_args)));
}
if (!static_link) {
const char linker_options[] = "Linker Options";
const char *default_dynamic_lib_names[] = { "/DEFAULTLIB:msvcrt",
"/DEFAULTLIB:ucrt.lib",
"/DEFAULTLIB:vcruntime.lib" };
LLVMValueRef default_lib_args[G_N_ELEMENTS (default_dynamic_lib_names)];
LLVMValueRef default_lib_nodes[G_N_ELEMENTS(default_dynamic_lib_names)];
const char *default_lib_name = NULL;
for (int i = 0; i < G_N_ELEMENTS (default_dynamic_lib_names); ++i) {
const char *default_lib_name = default_dynamic_lib_names[i];
default_lib_args[i] = LLVMMDString (default_lib_name, strlen (default_lib_name));
default_lib_nodes[i] = LLVMMDNode (default_lib_args + i, 1);
}
LLVMAddNamedMetadataOperand (module->lmodule, "llvm.linker.options", LLVMMDNode (default_lib_args, G_N_ELEMENTS (default_lib_args)));
}
#endif
{
LLVMTypeRef got_type = LLVMArrayType (module->ptr_type, 16);
module->dummy_got_var = LLVMAddGlobal (module->lmodule, got_type, "dummy_got");
module->got_idx_to_type = g_hash_table_new (NULL, NULL);
LLVMSetInitializer (module->dummy_got_var, LLVMConstNull (got_type));
LLVMSetVisibility (module->dummy_got_var, LLVMHiddenVisibility);
LLVMSetLinkage (module->dummy_got_var, LLVMInternalLinkage);
}
/* Add initialization array */
LLVMTypeRef inited_type = LLVMArrayType (LLVMInt8Type (), 0);
module->inited_var = LLVMAddGlobal (aot_module.lmodule, inited_type, "mono_inited_tmp");
LLVMSetInitializer (module->inited_var, LLVMConstNull (inited_type));
create_aot_info_var (module);
emit_gc_safepoint_poll (module, module->lmodule, NULL);
emit_llvm_code_start (module);
// Needs idx_to_lmethod
emit_init_funcs (module);
/* Add a dummy personality function */
if (!use_mono_personality_debug) {
LLVMValueRef personality = LLVMAddFunction (module->lmodule, default_personality_name, LLVMFunctionType (LLVMInt32Type (), NULL, 0, TRUE));
LLVMSetLinkage (personality, LLVMExternalLinkage);
//EMCC chockes if the personality function is referenced in the 'used' array
#ifndef TARGET_WASM
mark_as_used (module, personality);
#endif
}
/* Add a reference to the c++ exception we throw/catch */
{
LLVMTypeRef exc = LLVMPointerType (LLVMInt8Type (), 0);
module->sentinel_exception = LLVMAddGlobal (module->lmodule, exc, "_ZTIPi");
LLVMSetLinkage (module->sentinel_exception, LLVMExternalLinkage);
mono_llvm_set_is_constant (module->sentinel_exception);
}
}
void
mono_llvm_fixup_aot_module (void)
{
MonoLLVMModule *module = &aot_module;
MonoMethod *method;
/*
* Replace GOT entries for directly callable methods with the methods themselves.
* It would be easier to implement this by predefining all methods before compiling
* their bodies, but that couldn't handle the case when a method fails to compile
* with llvm.
*/
GHashTable *specializable = g_hash_table_new (NULL, NULL);
GHashTable *patches_to_null = g_hash_table_new (mono_patch_info_hash, mono_patch_info_equal);
for (int sindex = 0; sindex < module->callsite_list->len; ++sindex) {
CallSite *site = (CallSite*)g_ptr_array_index (module->callsite_list, sindex);
method = site->method;
LLVMValueRef lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method);
LLVMValueRef placeholder = (LLVMValueRef)site->load;
LLVMValueRef load;
gboolean can_direct_call = FALSE;
/* Replace sharable instances with their shared version */
if (!lmethod && method->is_inflated) {
if (mono_method_is_generic_sharable_full (method, FALSE, TRUE, FALSE)) {
ERROR_DECL (error);
MonoMethod *shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
if (is_ok (error)) {
lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, shared);
if (lmethod)
method = shared;
}
}
}
if (lmethod && !m_method_is_synchronized (method)) {
can_direct_call = TRUE;
} else if (m_method_is_wrapper (method) && !method->is_inflated) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
/* This is a call from the synchronized wrapper to the real method */
if (info->subtype == WRAPPER_SUBTYPE_SYNCHRONIZED_INNER) {
method = info->d.synchronized.method;
lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, method);
if (lmethod)
can_direct_call = TRUE;
}
}
if (can_direct_call) {
mono_llvm_replace_uses_of (placeholder, lmethod);
if (mono_aot_can_specialize (method))
g_hash_table_insert (specializable, lmethod, method);
g_hash_table_insert (patches_to_null, site->ji, site->ji);
} else {
// FIXME:
LLVMBuilderRef builder = LLVMCreateBuilder ();
LLVMPositionBuilderBefore (builder, placeholder);
load = get_aotconst_module (module, builder, site->ji->type, site->ji->data.target, site->type, NULL, NULL);
LLVMReplaceAllUsesWith (placeholder, load);
}
g_free (site);
}
mono_llvm_propagate_nonnull_final (specializable, module);
g_hash_table_destroy (specializable);
for (int i = 0; i < module->cfgs->len; ++i) {
/*
* Nullify the patches pointing to direct calls. This is needed to
* avoid allocating extra got slots, which is a perf problem and it
* makes module->max_got_offset invalid.
* It would be better to just store the patch_info in CallSite, but
* cfg->patch_info is copied in aot-compiler.c.
*/
MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i);
for (MonoJumpInfo *patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_METHOD) {
if (g_hash_table_lookup (patches_to_null, patch_info)) {
patch_info->type = MONO_PATCH_INFO_NONE;
/* Nullify the call to init_method () if possible */
g_assert (cfg->got_access_count);
cfg->got_access_count --;
if (cfg->got_access_count == 0) {
LLVMValueRef br = (LLVMValueRef)cfg->llvmonly_init_cond;
if (br)
LLVMSetSuccessor (br, 0, LLVMGetSuccessor (br, 1));
}
}
}
}
}
g_hash_table_destroy (patches_to_null);
}
static LLVMValueRef
llvm_array_from_uints (LLVMTypeRef el_type, guint32 *values, int nvalues)
{
int i;
LLVMValueRef res, *vals;
vals = g_new0 (LLVMValueRef, nvalues);
for (i = 0; i < nvalues; ++i)
vals [i] = LLVMConstInt (LLVMInt32Type (), values [i], FALSE);
res = LLVMConstArray (LLVMInt32Type (), vals, nvalues);
g_free (vals);
return res;
}
static LLVMValueRef
llvm_array_from_bytes (guint8 *values, int nvalues)
{
int i;
LLVMValueRef res, *vals;
vals = g_new0 (LLVMValueRef, nvalues);
for (i = 0; i < nvalues; ++i)
vals [i] = LLVMConstInt (LLVMInt8Type (), values [i], FALSE);
res = LLVMConstArray (LLVMInt8Type (), vals, nvalues);
g_free (vals);
return res;
}
/*
* mono_llvm_emit_aot_file_info:
*
* Emit the MonoAotFileInfo structure.
* Same as emit_aot_file_info () in aot-compiler.c.
*/
void
mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
{
MonoLLVMModule *module = &aot_module;
/* Save these for later */
memcpy (&module->aot_info, info, sizeof (MonoAotFileInfo));
module->has_jitted_code = has_jitted_code;
}
/*
* mono_llvm_emit_aot_data:
*
* Emit the binary data DATA pointed to by symbol SYMBOL.
* Return the LLVM variable for the data.
*/
gpointer
mono_llvm_emit_aot_data_aligned (const char *symbol, guint8 *data, int data_len, int align)
{
MonoLLVMModule *module = &aot_module;
LLVMTypeRef type;
LLVMValueRef d;
type = LLVMArrayType (LLVMInt8Type (), data_len);
d = LLVMAddGlobal (module->lmodule, type, symbol);
LLVMSetVisibility (d, LLVMHiddenVisibility);
LLVMSetLinkage (d, LLVMInternalLinkage);
LLVMSetInitializer (d, mono_llvm_create_constant_data_array (data, data_len));
if (align != 1)
LLVMSetAlignment (d, align);
mono_llvm_set_is_constant (d);
return d;
}
gpointer
mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
{
return mono_llvm_emit_aot_data_aligned (symbol, data, data_len, 8);
}
/* Add a reference to a global defined in JITted code */
static LLVMValueRef
AddJitGlobal (MonoLLVMModule *module, LLVMTypeRef type, const char *name)
{
char *s;
LLVMValueRef v;
s = g_strdup_printf ("%s%s", module->global_prefix, name);
v = LLVMAddGlobal (module->lmodule, LLVMInt8Type (), s);
LLVMSetVisibility (v, LLVMHiddenVisibility);
g_free (s);
return v;
}
#define FILE_INFO_NUM_HEADER_FIELDS 2
#define FILE_INFO_NUM_SCALAR_FIELDS 23
#define FILE_INFO_NUM_ARRAY_FIELDS 5
#define FILE_INFO_NUM_AOTID_FIELDS 1
#define FILE_INFO_NFIELDS (FILE_INFO_NUM_HEADER_FIELDS + MONO_AOT_FILE_INFO_NUM_SYMBOLS + FILE_INFO_NUM_SCALAR_FIELDS + FILE_INFO_NUM_ARRAY_FIELDS + FILE_INFO_NUM_AOTID_FIELDS)
static void
create_aot_info_var (MonoLLVMModule *module)
{
LLVMTypeRef file_info_type;
LLVMTypeRef *eltypes;
LLVMValueRef info_var;
int i, nfields, tindex;
LLVMModuleRef lmodule = module->lmodule;
/* Create an LLVM type to represent MonoAotFileInfo */
nfields = FILE_INFO_NFIELDS;
eltypes = g_new (LLVMTypeRef, nfields);
tindex = 0;
eltypes [tindex ++] = LLVMInt32Type ();
eltypes [tindex ++] = LLVMInt32Type ();
/* Symbols */
for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i)
eltypes [tindex ++] = LLVMPointerType (LLVMInt8Type (), 0);
/* Scalars */
for (i = 0; i < FILE_INFO_NUM_SCALAR_FIELDS; ++i)
eltypes [tindex ++] = LLVMInt32Type ();
/* Arrays */
eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TABLE_NUM);
for (i = 0; i < FILE_INFO_NUM_ARRAY_FIELDS - 1; ++i)
eltypes [tindex ++] = LLVMArrayType (LLVMInt32Type (), MONO_AOT_TRAMP_NUM);
eltypes [tindex ++] = LLVMArrayType (LLVMInt8Type (), 16);
g_assert (tindex == nfields);
file_info_type = LLVMStructCreateNamed (module->context, "MonoAotFileInfo");
LLVMStructSetBody (file_info_type, eltypes, nfields, FALSE);
info_var = LLVMAddGlobal (lmodule, file_info_type, "mono_aot_file_info");
module->info_var = info_var;
module->info_var_eltypes = eltypes;
}
static void
emit_aot_file_info (MonoLLVMModule *module)
{
LLVMTypeRef *eltypes, eltype;
LLVMValueRef info_var;
LLVMValueRef *fields;
int i, nfields, tindex;
MonoAotFileInfo *info;
LLVMModuleRef lmodule = module->lmodule;
info = &module->aot_info;
info_var = module->info_var;
eltypes = module->info_var_eltypes;
nfields = FILE_INFO_NFIELDS;
if (module->static_link) {
LLVMSetVisibility (info_var, LLVMHiddenVisibility);
LLVMSetLinkage (info_var, LLVMInternalLinkage);
}
#ifdef TARGET_WIN32
if (!module->static_link) {
LLVMSetDLLStorageClass (info_var, LLVMDLLExportStorageClass);
}
#endif
fields = g_new (LLVMValueRef, nfields);
tindex = 0;
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->version, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->dummy, FALSE);
/* Symbols */
/*
* We use LLVMGetNamedGlobal () for symbol which are defined in LLVM code, and LLVMAddGlobal ()
* for symbols defined in the .s file emitted by the aot compiler.
*/
eltype = eltypes [tindex];
if (module->llvm_only)
fields [tindex ++] = LLVMConstNull (eltype);
else
fields [tindex ++] = AddJitGlobal (module, eltype, "jit_got");
/* llc defines this directly */
if (!module->llvm_only) {
fields [tindex ++] = LLVMAddGlobal (lmodule, eltype, module->eh_frame_symbol);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = module->get_method;
fields [tindex ++] = module->get_unbox_tramp ? module->get_unbox_tramp : LLVMConstNull (eltype);
}
fields [tindex ++] = module->init_aotconst_func;
if (module->has_jitted_code) {
fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_start");
fields [tindex ++] = AddJitGlobal (module, eltype, "jit_code_end");
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
if (!module->llvm_only)
fields [tindex ++] = AddJitGlobal (module, eltype, "method_addresses");
else
fields [tindex ++] = LLVMConstNull (eltype);
if (module->llvm_only && module->unbox_tramp_indexes) {
fields [tindex ++] = module->unbox_tramp_indexes;
fields [tindex ++] = module->unbox_trampolines;
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
if (info->flags & MONO_AOT_FILE_FLAG_SEPARATE_DATA) {
for (i = 0; i < MONO_AOT_TABLE_NUM; ++i)
fields [tindex ++] = LLVMConstNull (eltype);
} else {
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "blob");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_name_table");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "class_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "ex_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "extra_method_table");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "got_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "llvm_got_info_offsets");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "image_table");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "weak_field_indexes");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "method_flags_table");
}
/* Not needed (mem_end) */
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_guid");
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "runtime_version");
if (info->trampoline_size [0]) {
fields [tindex ++] = AddJitGlobal (module, eltype, "specific_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "static_rgctx_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "imt_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "gsharedvt_arg_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "ftnptr_arg_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_arbitrary_trampolines");
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
if (module->static_link && !module->llvm_only)
fields [tindex ++] = AddJitGlobal (module, eltype, "globals");
else
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMGetNamedGlobal (lmodule, "assembly_name");
if (!module->llvm_only) {
fields [tindex ++] = AddJitGlobal (module, eltype, "plt");
fields [tindex ++] = AddJitGlobal (module, eltype, "plt_end");
fields [tindex ++] = AddJitGlobal (module, eltype, "unwind_info");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampolines_end");
fields [tindex ++] = AddJitGlobal (module, eltype, "unbox_trampoline_addresses");
} else {
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
fields [tindex ++] = LLVMConstNull (eltype);
}
for (i = 0; i < MONO_AOT_FILE_INFO_NUM_SYMBOLS; ++i) {
g_assert (fields [FILE_INFO_NUM_HEADER_FIELDS + i]);
fields [FILE_INFO_NUM_HEADER_FIELDS + i] = LLVMConstBitCast (fields [FILE_INFO_NUM_HEADER_FIELDS + i], eltype);
}
/* Scalars */
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_offset_base, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_got_info_offset_base, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->got_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->llvm_got_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->plt_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nmethods, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nextra_methods, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->flags, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->opts, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->simd_opts, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->gc_name_index, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->num_rgctx_fetch_trampolines, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->double_align, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->long_align, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->generic_tramp_num, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_shift_bits, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->card_table_mask, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->tramp_page_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->call_table_entry_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->nshared_got_entries, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), info->datafile_size, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_num, FALSE);
fields [tindex ++] = LLVMConstInt (LLVMInt32Type (), module->unbox_tramp_elemsize, FALSE);
/* Arrays */
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->table_offsets, MONO_AOT_TABLE_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->num_trampolines, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_got_offset_base, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->trampoline_size, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_uints (LLVMInt32Type (), info->tramp_page_code_offsets, MONO_AOT_TRAMP_NUM);
fields [tindex ++] = llvm_array_from_bytes (info->aotid, 16);
g_assert (tindex == nfields);
LLVMSetInitializer (info_var, LLVMConstNamedStruct (LLVMGetElementType (LLVMTypeOf (info_var)), fields, nfields));
if (module->static_link) {
char *s, *p;
LLVMValueRef var;
s = g_strdup_printf ("mono_aot_module_%s_info", module->assembly->aname.name);
/* Get rid of characters which cannot occur in symbols */
p = s;
for (p = s; *p; ++p) {
if (!(isalnum (*p) || *p == '_'))
*p = '_';
}
var = LLVMAddGlobal (module->lmodule, LLVMPointerType (LLVMInt8Type (), 0), s);
g_free (s);
LLVMSetInitializer (var, LLVMConstBitCast (LLVMGetNamedGlobal (module->lmodule, "mono_aot_file_info"), LLVMPointerType (LLVMInt8Type (), 0)));
LLVMSetLinkage (var, LLVMExternalLinkage);
}
}
typedef struct {
LLVMValueRef lmethod;
int argument;
} NonnullPropWorkItem;
static void
mono_llvm_nonnull_state_update (EmitContext *ctx, LLVMValueRef lcall, MonoMethod *call_method, LLVMValueRef *args, int num_params)
{
if (mono_aot_can_specialize (call_method)) {
int num_passed = LLVMGetNumArgOperands (lcall);
g_assert (num_params <= num_passed);
g_assert (ctx->module->method_to_call_info);
GArray *call_site_union = (GArray *) g_hash_table_lookup (ctx->module->method_to_call_info, call_method);
if (!call_site_union) {
call_site_union = g_array_sized_new (FALSE, TRUE, sizeof (gint32), num_params);
int zero = 0;
for (int i = 0; i < num_params; i++)
g_array_insert_val (call_site_union, i, zero);
}
for (int i = 0; i < num_params; i++) {
if (mono_llvm_is_nonnull (args [i])) {
g_assert (i < LLVMGetNumArgOperands (lcall));
mono_llvm_set_call_nonnull_arg (lcall, i);
} else {
gint32 *nullable_count = &g_array_index (call_site_union, gint32, i);
*nullable_count = *nullable_count + 1;
}
}
g_hash_table_insert (ctx->module->method_to_call_info, call_method, call_site_union);
}
}
static void
mono_llvm_propagate_nonnull_final (GHashTable *all_specializable, MonoLLVMModule *module)
{
// When we first traverse the mini IL, we mark the things that are
// nonnull (the roots). Then, for all of the methods that can be specialized, we
// see if their call sites have nonnull attributes.
// If so, we mark the function's param. This param has uses to propagate
// the attribute to. This propagation can trigger a need to mark more attributes
// non-null, and so on and so forth.
GSList *queue = NULL;
GHashTableIter iter;
LLVMValueRef lmethod;
MonoMethod *method;
g_hash_table_iter_init (&iter, all_specializable);
while (g_hash_table_iter_next (&iter, (void**)&lmethod, (void**)&method)) {
GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, method);
// Basic sanity checking
if (call_site_union)
g_assert (call_site_union->len == LLVMCountParams (lmethod));
// Add root to work queue
for (int i = 0; call_site_union && i < call_site_union->len; i++) {
if (g_array_index (call_site_union, gint32, i) == 0) {
NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem));
item->lmethod = lmethod;
item->argument = i;
queue = g_slist_prepend (queue, item);
}
}
}
// This is essentially reference counting, and we are propagating
// the refcount decrement here. We have less work to do than we may otherwise
// because we are only working with a set of subgraphs of specializable functions.
//
// We rely on being able to see all of the references in the graph.
// This is ensured by the function mono_aot_can_specialize. Everything in
// all_specializable is a function that can be specialized, and is the resulting
// node in the graph after all of the subsitutions are done.
//
// Anything disrupting the direct calls made with self-init will break this optimization.
while (queue) {
// Update the queue state.
// Our only other per-iteration responsibility is now to free current
NonnullPropWorkItem *current = (NonnullPropWorkItem *) queue->data;
queue = queue->next;
g_assert (current->argument < LLVMCountParams (current->lmethod));
// Does the actual leaf-node work here
// Mark the function argument as nonnull for LLVM
mono_llvm_set_func_nonnull_arg (current->lmethod, current->argument);
// The rest of this is for propagating forward nullability changes
// to calls that use the argument that is now nullable.
// Get the actual LLVM value of the argument, so we can see which call instructions
// used that argument
LLVMValueRef caller_argument = LLVMGetParam (current->lmethod, current->argument);
// Iterate over the calls using the newly-non-nullable argument
GSList *calls = mono_llvm_calls_using (caller_argument);
for (GSList *cursor = calls; cursor != NULL; cursor = cursor->next) {
LLVMValueRef lcall = (LLVMValueRef) cursor->data;
LLVMValueRef callee_lmethod = LLVMGetCalledValue (lcall);
// If this wasn't a direct call for which mono_aot_can_specialize is true,
// this lookup won't find a MonoMethod.
MonoMethod *callee_method = (MonoMethod *) g_hash_table_lookup (all_specializable, callee_lmethod);
if (!callee_method)
continue;
// Decrement number of nullable refs at that func's arg offset
GArray *call_site_union = (GArray *) g_hash_table_lookup (module->method_to_call_info, callee_method);
// It has module-local callers and is specializable, should have seen this call site
// and inited this
g_assert (call_site_union);
// The function *definition* parameter arity should always be consistent
int max_params = LLVMCountParams (callee_lmethod);
if (call_site_union->len != max_params) {
mono_llvm_dump_value (callee_lmethod);
g_assert_not_reached ();
}
// Get the values that correspond to the parameters passed to the call
// that used our argument
LLVMValueRef *operands = mono_llvm_call_args (lcall);
for (int call_argument = 0; call_argument < max_params; call_argument++) {
// Every time we used the newly-non-nullable argument, decrement the nullable
// refcount for that function.
if (caller_argument == operands [call_argument]) {
gint32 *nullable_count = &g_array_index (call_site_union, gint32, call_argument);
g_assert (*nullable_count > 0);
*nullable_count = *nullable_count - 1;
// If we caused that callee's parameter to become newly nullable, add to work queue
if (*nullable_count == 0) {
NonnullPropWorkItem *item = g_malloc (sizeof (NonnullPropWorkItem));
item->lmethod = callee_lmethod;
item->argument = call_argument;
queue = g_slist_prepend (queue, item);
}
}
}
g_free (operands);
// Update nullability refcount information for the callee now
g_hash_table_insert (module->method_to_call_info, callee_method, call_site_union);
}
g_slist_free (calls);
g_free (current);
}
}
/*
* Emit the aot module into the LLVM bitcode file FILENAME.
*/
void
mono_llvm_emit_aot_module (const char *filename, const char *cu_name)
{
LLVMTypeRef inited_type;
LLVMValueRef real_inited;
MonoLLVMModule *module = &aot_module;
emit_llvm_code_end (module);
/*
* Create the real init_var and replace all uses of the dummy variable with
* the real one.
*/
inited_type = LLVMArrayType (LLVMInt8Type (), module->max_inited_idx + 1);
real_inited = LLVMAddGlobal (module->lmodule, inited_type, "mono_inited");
LLVMSetInitializer (real_inited, LLVMConstNull (inited_type));
LLVMSetLinkage (real_inited, LLVMInternalLinkage);
mono_llvm_replace_uses_of (module->inited_var, real_inited);
LLVMDeleteGlobal (module->inited_var);
/* Replace the dummy info_ variables with the real ones */
for (int i = 0; i < module->cfgs->len; ++i) {
MonoCompile *cfg = (MonoCompile *)g_ptr_array_index (module->cfgs, i);
// FIXME: Eliminate unused vars
// FIXME: Speed this up
if (cfg->llvm_dummy_info_var) {
if (cfg->llvm_info_var) {
mono_llvm_replace_uses_of (cfg->llvm_dummy_info_var, cfg->llvm_info_var);
LLVMDeleteGlobal (cfg->llvm_dummy_info_var);
} else {
// FIXME: How can this happen ?
LLVMSetInitializer (cfg->llvm_dummy_info_var, mono_llvm_create_constant_data_array (NULL, 0));
}
}
}
if (module->llvm_only) {
emit_get_method (&aot_module);
emit_get_unbox_tramp (&aot_module);
}
emit_init_aotconst (module);
emit_llvm_used (&aot_module);
emit_dbg_info (&aot_module, filename, cu_name);
emit_aot_file_info (&aot_module);
/* Replace PLT entries for directly callable methods with the methods themselves */
{
GHashTableIter iter;
MonoJumpInfo *ji;
LLVMValueRef callee;
GHashTable *specializable = g_hash_table_new (NULL, NULL);
g_hash_table_iter_init (&iter, module->plt_entries_ji);
while (g_hash_table_iter_next (&iter, (void**)&ji, (void**)&callee)) {
if (mono_aot_is_direct_callable (ji)) {
LLVMValueRef lmethod;
lmethod = (LLVMValueRef)g_hash_table_lookup (module->method_to_lmethod, ji->data.method);
/* The types might not match because the caller might pass an rgctx */
if (lmethod && LLVMTypeOf (callee) == LLVMTypeOf (lmethod)) {
mono_llvm_replace_uses_of (callee, lmethod);
if (mono_aot_can_specialize (ji->data.method))
g_hash_table_insert (specializable, lmethod, ji->data.method);
mono_aot_mark_unused_llvm_plt_entry (ji);
}
}
}
mono_llvm_propagate_nonnull_final (specializable, module);
g_hash_table_destroy (specializable);
}
#if 0
{
char *verifier_err;
if (LLVMVerifyModule (module->lmodule, LLVMReturnStatusAction, &verifier_err)) {
printf ("%s\n", verifier_err);
g_assert_not_reached ();
}
}
#endif
/* Note: You can still dump an invalid bitcode file by running `llvm-dis`
* in a debugger, set a breakpoint on `LLVMVerifyModule` and fake its
* result to 0 (indicating success). */
LLVMWriteBitcodeToFile (module->lmodule, filename);
}
static LLVMValueRef
md_string (const char *s)
{
return LLVMMDString (s, strlen (s));
}
/* Debugging support */
static void
emit_dbg_info (MonoLLVMModule *module, const char *filename, const char *cu_name)
{
LLVMModuleRef lmodule = module->lmodule;
LLVMValueRef args [16], ver;
/*
* This can only be enabled when LLVM code is emitted into a separate object
* file, since the AOT compiler also emits dwarf info,
* and the abbrev indexes will not be correct since llvm has added its own
* abbrevs.
*/
if (!module->emit_dwarf)
return;
mono_llvm_di_builder_finalize (module->di_builder);
args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
args [1] = LLVMMDString ("Dwarf Version", strlen ("Dwarf Version"));
args [2] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
ver = LLVMMDNode (args, 3);
LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver);
args [0] = LLVMConstInt (LLVMInt32Type (), 2, FALSE);
args [1] = LLVMMDString ("Debug Info Version", strlen ("Debug Info Version"));
args [2] = LLVMConstInt (LLVMInt64Type (), 3, FALSE);
ver = LLVMMDNode (args, 3);
LLVMAddNamedMetadataOperand (lmodule, "llvm.module.flags", ver);
}
static LLVMValueRef
emit_dbg_subprogram (EmitContext *ctx, MonoCompile *cfg, LLVMValueRef method, const char *name)
{
MonoLLVMModule *module = ctx->module;
MonoDebugMethodInfo *minfo = ctx->minfo;
char *source_file, *dir, *filename;
MonoSymSeqPoint *sym_seq_points;
int n_seq_points;
if (!minfo)
return NULL;
mono_debug_get_seq_points (minfo, &source_file, NULL, NULL, &sym_seq_points, &n_seq_points);
if (!source_file)
source_file = g_strdup ("<unknown>");
dir = g_path_get_dirname (source_file);
filename = g_path_get_basename (source_file);
g_free (source_file);
return (LLVMValueRef)mono_llvm_di_create_function (module->di_builder, module->cu, method, cfg->method->name, name, dir, filename, n_seq_points ? sym_seq_points [0].line : 1);
}
static void
emit_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder, const unsigned char *cil_code)
{
MonoCompile *cfg = ctx->cfg;
if (ctx->minfo && cil_code && cil_code >= cfg->header->code && cil_code < cfg->header->code + cfg->header->code_size) {
MonoDebugSourceLocation *loc;
LLVMValueRef loc_md;
loc = mono_debug_method_lookup_location (ctx->minfo, cil_code - cfg->header->code);
if (loc) {
loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, loc->row, loc->column);
mono_llvm_di_set_location (builder, loc_md);
mono_debug_free_source_location (loc);
}
}
}
static void
emit_default_dbg_loc (EmitContext *ctx, LLVMBuilderRef builder)
{
if (ctx->minfo) {
LLVMValueRef loc_md;
loc_md = (LLVMValueRef)mono_llvm_di_create_location (ctx->module->di_builder, ctx->dbg_md, 0, 0);
mono_llvm_di_set_location (builder, loc_md);
}
}
/*
DESIGN:
- Emit LLVM IR from the mono IR using the LLVM C API.
- The original arch specific code remains, so we can fall back to it if we run
into something we can't handle.
*/
/*
A partial list of issues:
- Handling of opcodes which can throw exceptions.
In the mono JIT, these are implemented using code like this:
method:
<compare>
throw_pos:
b<cond> ex_label
<rest of code>
ex_label:
push throw_pos - method
call <exception trampoline>
The problematic part is push throw_pos - method, which cannot be represented
in the LLVM IR, since it does not support label values.
-> this can be implemented in AOT mode using inline asm + labels, but cannot
be implemented in JIT mode ?
-> a possible but slower implementation would use the normal exception
throwing code but it would need to control the placement of the throw code
(it needs to be exactly after the compare+branch).
-> perhaps add a PC offset intrinsics ?
- efficient implementation of .ovf opcodes.
These are currently implemented as:
<ins which sets the condition codes>
b<cond> ex_label
Some overflow opcodes are now supported by LLVM SVN.
- exception handling, unwinding.
- SSA is disabled for methods with exception handlers
- How to obtain unwind info for LLVM compiled methods ?
-> this is now solved by converting the unwind info generated by LLVM
into our format.
- LLVM uses the c++ exception handling framework, while we use our home grown
code, and couldn't use the c++ one:
- its not supported under VC++, other exotic platforms.
- it might be impossible to support filter clauses with it.
- trampolines.
The trampolines need a predictable call sequence, since they need to disasm
the calling code to obtain register numbers / offsets.
LLVM currently generates this code in non-JIT mode:
mov -0x98(%rax),%eax
callq *%rax
Here, the vtable pointer is lost.
-> solution: use one vtable trampoline per class.
- passing/receiving the IMT pointer/RGCTX.
-> solution: pass them as normal arguments ?
- argument passing.
LLVM does not allow the specification of argument registers etc. This means
that all calls are made according to the platform ABI.
- passing/receiving vtypes.
Vtypes passed/received in registers are handled by the front end by using
a signature with scalar arguments, and loading the parts of the vtype into those
arguments.
Vtypes passed on the stack are handled using the 'byval' attribute.
- ldaddr.
Supported though alloca, we need to emit the load/store code.
- types.
The mono JIT uses pointer sized iregs/double fregs, while LLVM uses precisely
typed registers, so we have to keep track of the precise LLVM type of each vreg.
This is made easier because the IR is already in SSA form.
An additional problem is that our IR is not consistent with types, i.e. i32/i64
types are frequently used incorrectly.
*/
/*
AOT SUPPORT:
Emit LLVM bytecode into a .bc file, compile it using llc into a .s file, then link
it with the file containing the methods emitted by the JIT and the AOT data
structures.
*/
/* FIXME: Normalize some aspects of the mono IR to allow easier translation, like:
* - each bblock should end with a branch
* - setting the return value, making cfg->ret non-volatile
* - avoid some transformations in the JIT which make it harder for us to generate
* code.
* - use pointer types to help optimizations.
*/
#else /* DISABLE_JIT */
void
mono_llvm_cleanup (void)
{
}
void
mono_llvm_free_mem_manager (MonoJitMemoryManager *mem_manager)
{
}
void
mono_llvm_init (gboolean enable_jit)
{
}
#endif /* DISABLE_JIT */
#if !defined(DISABLE_JIT) && !defined(MONO_CROSS_COMPILE)
/* LLVM JIT support */
/*
* decode_llvm_eh_info:
*
* Decode the EH table emitted by llvm in jit mode, and store
* the result into cfg.
*/
static void
decode_llvm_eh_info (EmitContext *ctx, gpointer eh_frame)
{
MonoCompile *cfg = ctx->cfg;
guint8 *cie, *fde;
int fde_len;
MonoLLVMFDEInfo info;
MonoJitExceptionInfo *ei;
guint8 *p = (guint8*)eh_frame;
int version, fde_count, fde_offset;
guint32 ei_len, i, nested_len;
gpointer *type_info;
gint32 *table;
guint8 *unw_info;
/*
* Decode the one element EH table emitted by the MonoException class
* in llvm.
*/
/* Similar to decode_llvm_mono_eh_frame () in aot-runtime.c */
version = *p;
g_assert (version == 3);
p ++;
p ++;
p = (guint8 *)ALIGN_PTR_TO (p, 4);
fde_count = *(guint32*)p;
p += 4;
table = (gint32*)p;
g_assert (fde_count <= 2);
/* The first entry is the real method */
g_assert (table [0] == 1);
fde_offset = table [1];
table += fde_count * 2;
/* Extra entry */
cfg->code_len = table [0];
fde_len = table [1] - fde_offset;
table += 2;
fde = (guint8*)eh_frame + fde_offset;
cie = (guint8*)table;
/* Compute lengths */
mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, NULL, NULL, NULL);
ei = (MonoJitExceptionInfo *)g_malloc0 (info.ex_info_len * sizeof (MonoJitExceptionInfo));
type_info = (gpointer *)g_malloc0 (info.ex_info_len * sizeof (gpointer));
unw_info = (guint8*)g_malloc0 (info.unw_info_len);
mono_unwind_decode_llvm_mono_fde (fde, fde_len, cie, cfg->native_code, &info, ei, type_info, unw_info);
cfg->encoded_unwind_ops = unw_info;
cfg->encoded_unwind_ops_len = info.unw_info_len;
if (cfg->verbose_level > 1)
mono_print_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
if (info.this_reg != -1) {
cfg->llvm_this_reg = info.this_reg;
cfg->llvm_this_offset = info.this_offset;
}
ei_len = info.ex_info_len;
// Nested clauses are currently disabled
nested_len = 0;
cfg->llvm_ex_info = (MonoJitExceptionInfo*)mono_mempool_alloc0 (cfg->mempool, (ei_len + nested_len) * sizeof (MonoJitExceptionInfo));
cfg->llvm_ex_info_len = ei_len + nested_len;
memcpy (cfg->llvm_ex_info, ei, ei_len * sizeof (MonoJitExceptionInfo));
/* Fill the rest of the information from the type info */
for (i = 0; i < ei_len; ++i) {
gint32 clause_index = *(gint32*)type_info [i];
MonoExceptionClause *clause = &cfg->header->clauses [clause_index];
cfg->llvm_ex_info [i].flags = clause->flags;
cfg->llvm_ex_info [i].data.catch_class = clause->data.catch_class;
cfg->llvm_ex_info [i].clause_index = clause_index;
}
}
static MonoLLVMModule*
init_jit_module (void)
{
MonoJitMemoryManager *jit_mm;
MonoLLVMModule *module;
// FIXME:
jit_mm = get_default_jit_mm ();
if (jit_mm->llvm_module)
return (MonoLLVMModule*)jit_mm->llvm_module;
mono_loader_lock ();
if (jit_mm->llvm_module) {
mono_loader_unlock ();
return (MonoLLVMModule*)jit_mm->llvm_module;
}
module = g_new0 (MonoLLVMModule, 1);
module->context = LLVMGetGlobalContext ();
module->mono_ee = (MonoEERef*)mono_llvm_create_ee (&module->ee);
// This contains just the intrinsics
module->lmodule = LLVMModuleCreateWithName ("jit-global-module");
add_intrinsics (module->lmodule);
add_types (module);
module->llvm_types = g_hash_table_new (NULL, NULL);
mono_memory_barrier ();
jit_mm->llvm_module = module;
mono_loader_unlock ();
return (MonoLLVMModule*)jit_mm->llvm_module;
}
static void
llvm_jit_finalize_method (EmitContext *ctx)
{
MonoCompile *cfg = ctx->cfg;
int nvars = g_hash_table_size (ctx->jit_callees);
LLVMValueRef *callee_vars = g_new0 (LLVMValueRef, nvars);
gpointer *callee_addrs = g_new0 (gpointer, nvars);
GHashTableIter iter;
LLVMValueRef var;
MonoMethod *callee;
gpointer eh_frame;
int i;
/*
* Compute the addresses of the LLVM globals pointing to the
* methods called by the current method. Pass it to the trampoline
* code so it can update them after their corresponding method was
* compiled.
*/
g_hash_table_iter_init (&iter, ctx->jit_callees);
i = 0;
while (g_hash_table_iter_next (&iter, NULL, (void**)&var))
callee_vars [i ++] = var;
mono_llvm_optimize_method (ctx->lmethod);
if (cfg->verbose_level > 1) {
g_print ("\n*** Optimized LLVM IR for %s ***\n", mono_method_full_name (cfg->method, TRUE));
if (cfg->compile_aot) {
mono_llvm_dump_value (ctx->lmethod);
} else {
mono_llvm_dump_module (ctx->lmodule);
}
g_print ("***\n\n");
}
mono_codeman_enable_write ();
cfg->native_code = (guint8*)mono_llvm_compile_method (ctx->module->mono_ee, cfg, ctx->lmethod, nvars, callee_vars, callee_addrs, &eh_frame);
mono_llvm_remove_gc_safepoint_poll (ctx->lmodule);
mono_codeman_disable_write ();
decode_llvm_eh_info (ctx, eh_frame);
// FIXME:
MonoJitMemoryManager *jit_mm = get_default_jit_mm ();
jit_mm_lock (jit_mm);
if (!jit_mm->llvm_jit_callees)
jit_mm->llvm_jit_callees = g_hash_table_new (NULL, NULL);
g_hash_table_iter_init (&iter, ctx->jit_callees);
i = 0;
while (g_hash_table_iter_next (&iter, (void**)&callee, (void**)&var)) {
GSList *addrs = (GSList*)g_hash_table_lookup (jit_mm->llvm_jit_callees, callee);
addrs = g_slist_prepend (addrs, callee_addrs [i]);
g_hash_table_insert (jit_mm->llvm_jit_callees, callee, addrs);
i ++;
}
jit_mm_unlock (jit_mm);
}
#else
static MonoLLVMModule*
init_jit_module (void)
{
g_assert_not_reached ();
}
static void
llvm_jit_finalize_method (EmitContext *ctx)
{
g_assert_not_reached ();
}
#endif
static MonoCPUFeatures cpu_features;
MonoCPUFeatures mono_llvm_get_cpu_features (void)
{
static const CpuFeatureAliasFlag flags_map [] = {
#if defined(TARGET_X86) || defined(TARGET_AMD64)
{ "sse", MONO_CPU_X86_SSE },
{ "sse2", MONO_CPU_X86_SSE2 },
{ "pclmul", MONO_CPU_X86_PCLMUL },
{ "aes", MONO_CPU_X86_AES },
{ "sse2", MONO_CPU_X86_SSE2 },
{ "sse3", MONO_CPU_X86_SSE3 },
{ "ssse3", MONO_CPU_X86_SSSE3 },
{ "sse4.1", MONO_CPU_X86_SSE41 },
{ "sse4.2", MONO_CPU_X86_SSE42 },
{ "popcnt", MONO_CPU_X86_POPCNT },
{ "avx", MONO_CPU_X86_AVX },
{ "avx2", MONO_CPU_X86_AVX2 },
{ "fma", MONO_CPU_X86_FMA },
{ "lzcnt", MONO_CPU_X86_LZCNT },
{ "bmi", MONO_CPU_X86_BMI1 },
{ "bmi2", MONO_CPU_X86_BMI2 },
#endif
#if defined(TARGET_ARM64)
{ "crc", MONO_CPU_ARM64_CRC },
{ "crypto", MONO_CPU_ARM64_CRYPTO },
{ "neon", MONO_CPU_ARM64_NEON },
{ "rdm", MONO_CPU_ARM64_RDM },
{ "dotprod", MONO_CPU_ARM64_DP },
#endif
#if defined(TARGET_WASM)
{ "simd", MONO_CPU_WASM_SIMD },
#endif
// flags_map cannot be zero length in MSVC, so add useless dummy entry for arm32
#if defined(TARGET_ARM) && defined(HOST_WIN32)
{ "inited", MONO_CPU_INITED},
#endif
};
if (!cpu_features)
cpu_features = MONO_CPU_INITED | (MonoCPUFeatures)mono_llvm_check_cpu_features (flags_map, G_N_ELEMENTS (flags_map));
return cpu_features;
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-native-types.c | /**
* \file
* intrinsics for variable sized int/floats
*
* Author:
* Rodrigo Kumpera ([email protected])
*
* (C) 2013 Xamarin
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <stdio.h>
#include "mini.h"
#include "ir-emit.h"
#include "glib.h"
typedef struct {
const char *op_name;
short op_table[4];
} IntIntrisic;
typedef struct {
short op_index;
MonoStackType big_stack_type;
MonoStackType small_stack_type;
MonoStackType stack_type;
short conv_4_to_8;
short conv_8_to_4;
short move;
short inc_op;
short dec_op;
short store_op;
short compare_op;
} MagicTypeInfo;
#if TARGET_SIZEOF_VOID_P == 8
#define OP_PT_ADD OP_LADD
#define OP_PT_SUB OP_LSUB
#define OP_PT_MUL OP_LMUL
#define OP_PT_DIV OP_LDIV
#define OP_PT_REM OP_LREM
#define OP_PT_NEG OP_LNEG
#define OP_PT_AND OP_LAND
#define OP_PT_OR OP_LOR
#define OP_PT_XOR OP_LXOR
#define OP_PT_NOT OP_LNOT
#define OP_PT_SHL OP_LSHL
#define OP_PT_SHR OP_LSHR
#define OP_PT_DIV_UN OP_LDIV_UN
#define OP_PT_REM_UN OP_LREM_UN
#define OP_PT_SHR_UN OP_LSHR_UN
#define OP_PT_ADD_IMM OP_LADD_IMM
#define OP_PT_SUB_IMM OP_LSUB_IMM
#define OP_PT_STORE_FP_MEMBASE_REG OP_STORER8_MEMBASE_REG
#define OP_PCOMPARE OP_LCOMPARE
#else
#define OP_PT_ADD OP_IADD
#define OP_PT_SUB OP_ISUB
#define OP_PT_MUL OP_IMUL
#define OP_PT_DIV OP_IDIV
#define OP_PT_REM OP_IREM
#define OP_PT_NEG OP_INEG
#define OP_PT_AND OP_IAND
#define OP_PT_OR OP_IOR
#define OP_PT_XOR OP_IXOR
#define OP_PT_NOT OP_INOT
#define OP_PT_SHL OP_ISHL
#define OP_PT_SHR OP_ISHR
#define OP_PT_DIV_UN OP_IDIV_UN
#define OP_PT_REM_UN OP_IREM_UN
#define OP_PT_SHR_UN OP_ISHR_UN
#define OP_PT_ADD_IMM OP_IADD_IMM
#define OP_PT_SUB_IMM OP_ISUB_IMM
#define OP_PT_STORE_FP_MEMBASE_REG OP_STORER4_MEMBASE_REG
#define OP_PCOMPARE OP_ICOMPARE
#endif
gsize
mini_magic_type_size (MonoCompile *cfg, MonoType *type)
{
if (type->type == MONO_TYPE_I4 || type->type == MONO_TYPE_U4)
return 4;
else if (type->type == MONO_TYPE_I8 || type->type == MONO_TYPE_U8)
return 8;
else if (type->type == MONO_TYPE_R4 && !m_type_is_byref (type))
return 4;
else if (type->type == MONO_TYPE_R8 && !m_type_is_byref (type))
return 8;
return TARGET_SIZEOF_VOID_P;
}
#ifndef DISABLE_JIT
static const IntIntrisic int_binop[] = {
{ "op_Addition", { OP_PT_ADD, OP_PT_ADD, OP_FADD, OP_RADD } },
{ "op_Subtraction", { OP_PT_SUB, OP_PT_SUB, OP_FSUB, OP_RSUB } },
{ "op_Multiply", { OP_PT_MUL, OP_PT_MUL, OP_FMUL, OP_RMUL } },
{ "op_Division", { OP_PT_DIV, OP_PT_DIV_UN, OP_FDIV, OP_RDIV } },
{ "op_Modulus", { OP_PT_REM, OP_PT_REM_UN, OP_FREM, OP_RREM } },
{ "op_BitwiseAnd", { OP_PT_AND, OP_PT_AND } },
{ "op_BitwiseOr", { OP_PT_OR, OP_PT_OR } },
{ "op_ExclusiveOr", { OP_PT_XOR, OP_PT_XOR } },
{ "op_LeftShift", { OP_PT_SHL, OP_PT_SHL } },
{ "op_RightShift", { OP_PT_SHR, OP_PT_SHR_UN } },
};
static const IntIntrisic int_unnop[] = {
{ "op_UnaryPlus", { OP_MOVE, OP_MOVE, OP_FMOVE, OP_RMOVE } },
{ "op_UnaryNegation", { OP_PT_NEG, OP_PT_NEG, OP_FNEG, OP_RNEG } },
{ "op_OnesComplement", { OP_PT_NOT, OP_PT_NOT, OP_FNOT, OP_RNOT } },
};
static const IntIntrisic int_cmpop[] = {
{ "op_Inequality", { OP_ICNEQ, OP_ICNEQ, OP_FCNEQ, OP_RCNEQ } },
{ "op_Equality", { OP_ICEQ, OP_ICEQ, OP_FCEQ, OP_RCEQ } },
{ "op_GreaterThan", { OP_ICGT, OP_ICGT_UN, OP_FCGT, OP_RCGT } },
{ "op_GreaterThanOrEqual", { OP_ICGE, OP_ICGE_UN, OP_FCLT_UN, OP_RCLT_UN } },
{ "op_LessThan", { OP_ICLT, OP_ICLT_UN, OP_FCLT, OP_RCLT } },
{ "op_LessThanOrEqual", { OP_ICLE, OP_ICLE_UN, OP_FCGT_UN, OP_RCGT_UN } },
};
static const MagicTypeInfo type_info[] = {
//nint
{ 0, STACK_I8, STACK_I4, STACK_PTR, OP_ICONV_TO_I8, OP_LCONV_TO_I4, OP_MOVE, OP_PT_ADD_IMM, OP_PT_SUB_IMM, OP_STORE_MEMBASE_REG, OP_PCOMPARE },
//nuint
{ 1, STACK_I8, STACK_I4, STACK_PTR, OP_ICONV_TO_U8, OP_LCONV_TO_U4, OP_MOVE, OP_PT_ADD_IMM, OP_PT_SUB_IMM, OP_STORE_MEMBASE_REG, OP_PCOMPARE },
//nfloat
{ 2, STACK_R8, STACK_R8, STACK_R8, OP_FCONV_TO_R8, OP_FCONV_TO_R4, OP_FMOVE, 0, 0, OP_PT_STORE_FP_MEMBASE_REG, 0 },
};
static MonoInst*
emit_narrow (MonoCompile *cfg, const MagicTypeInfo *info, int sreg)
{
MonoInst *ins;
MONO_INST_NEW (cfg, ins, info->conv_8_to_4);
ins->sreg1 = sreg;
if (info->conv_8_to_4 == OP_FCONV_TO_R4)
ins->type = cfg->r4_stack_type;
else
ins->type = info->small_stack_type;
ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
MONO_ADD_INS (cfg->cbb, ins);
return mono_decompose_opcode (cfg, ins);
}
static MonoInst*
emit_widen (MonoCompile *cfg, const MagicTypeInfo *info, int sreg)
{
MonoInst *ins;
if (info->conv_4_to_8 == OP_FCONV_TO_R8)
MONO_INST_NEW (cfg, ins, OP_RCONV_TO_R8);
else
MONO_INST_NEW (cfg, ins, info->conv_4_to_8);
ins->sreg1 = sreg;
ins->type = info->big_stack_type;
ins->dreg = alloc_dreg (cfg, info->big_stack_type);
MONO_ADD_INS (cfg->cbb, ins);
return mono_decompose_opcode (cfg, ins);
}
static MonoInst*
emit_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, const MagicTypeInfo *info)
{
int i = 0;
const char *name = cmethod->name;
MonoInst *ins;
int type_index;
MonoStackType stack_type;
if (info->op_index == 2 && TARGET_SIZEOF_VOID_P == 4) {
type_index = 3;
stack_type = STACK_R4;
} else {
type_index = info->op_index;
stack_type = info->stack_type;
}
if (!strcmp ("op_Implicit", name) || !strcmp ("op_Explicit", name)) {
int source_size = mini_magic_type_size (cfg, fsig->params [0]);
int dest_size = mini_magic_type_size (cfg, fsig->ret);
switch (info->big_stack_type) {
case STACK_I8:
if (!mini_magic_is_int_type (fsig->params [0]) || !mini_magic_is_int_type (fsig->ret))
return NULL;
break;
case STACK_R8:
if (!mini_magic_is_float_type (fsig->params [0]) || !mini_magic_is_float_type (fsig->ret))
return NULL;
break;
default:
g_assert_not_reached ();
}
//4 -> 4 or 8 -> 8
if (source_size == dest_size)
return args [0];
//4 -> 8
if (source_size < dest_size)
return emit_widen (cfg, info, args [0]->dreg);
//8 -> 4
return emit_narrow (cfg, info, args [0]->dreg);
}
if (!strcmp (".ctor", name)) {
gboolean is_ldaddr = args [0]->opcode == OP_LDADDR;
int arg0 = args [1]->dreg;
int arg_size = mini_magic_type_size (cfg, fsig->params [0]);
if (arg_size > TARGET_SIZEOF_VOID_P) //8 -> 4
arg0 = emit_narrow (cfg, info, arg0)->dreg;
else if (arg_size < TARGET_SIZEOF_VOID_P) //4 -> 8
arg0 = emit_widen (cfg, info, arg0)->dreg;
if (is_ldaddr) { /*Eliminate LDADDR if it's initing a local var*/
int dreg = ((MonoInst*)args [0]->inst_p0)->dreg;
NULLIFY_INS (args [0]);
EMIT_NEW_UNALU (cfg, ins, info->move, dreg, arg0);
cfg->has_indirection = TRUE;
} else {
EMIT_NEW_STORE_MEMBASE (cfg, ins, info->store_op, args [0]->dreg, 0, arg0);
}
return ins;
}
if (!strcmp ("op_Increment", name) || !strcmp ("op_Decrement", name)) {
gboolean inc = !strcmp ("op_Increment", name);
/* FIXME float inc is too complex to bother with*/
//this is broken with ints too
// if (!info->inc_op)
return NULL;
/* We have IR for inc/dec */
MONO_INST_NEW (cfg, ins, inc ? info->inc_op : info->dec_op);
ins->dreg = alloc_dreg (cfg, (MonoStackType)info->stack_type);
ins->sreg1 = args [0]->dreg;
ins->inst_imm = 1;
ins->type = info->stack_type;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
for (i = 0; i < sizeof (int_binop) / sizeof (IntIntrisic); ++i) {
if (!strcmp (int_binop [i].op_name, name)) {
if (!int_binop [i].op_table [info->op_index])
return NULL;
g_assert (int_binop [i].op_table [type_index]);
MONO_INST_NEW (cfg, ins, int_binop [i].op_table [type_index]);
ins->dreg = alloc_dreg (cfg, stack_type);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->type = stack_type;
MONO_ADD_INS (cfg->cbb, ins);
return mono_decompose_opcode (cfg, ins);
}
}
for (i = 0; i < sizeof (int_unnop) / sizeof (IntIntrisic); ++i) {
if (!strcmp (int_unnop [i].op_name, name)) {
g_assert (int_unnop [i].op_table [type_index]);
MONO_INST_NEW (cfg, ins, int_unnop [i].op_table [type_index]);
ins->dreg = alloc_dreg (cfg, stack_type);
ins->sreg1 = args [0]->dreg;
ins->type = stack_type;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
}
for (i = 0; i < sizeof (int_cmpop) / sizeof (IntIntrisic); ++i) {
if (!strcmp (int_cmpop [i].op_name, name)) {
short op_cmp = int_cmpop [i].op_table [type_index];
g_assert (op_cmp);
if (info->compare_op) {
MONO_INST_NEW (cfg, ins, info->compare_op);
ins->dreg = -1;
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, op_cmp);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
} else {
MONO_INST_NEW (cfg, ins, op_cmp);
guint32 fcmp_dreg = ins->dreg = alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
if (op_cmp == OP_FCLT_UN || op_cmp == OP_FCGT_UN || op_cmp == OP_RCLT_UN || op_cmp == OP_RCGT_UN) {
/* we have to negate the result of this comparison:
* - op_GreaterThanOrEqual maps to NOT x OP_FCLT_UN / OP_RCLT_UN
* - op_LessThanOrEqual maps to NOT x OP_FCGT_UN / OP_RCGT_UN
*
* this matches generated bytecode by C# when doing the
* same operations on float/double. the `_UN` suffix says
* that if an operand is NaN, the result is true. If
* OP_FCGE/OP_FCLE is used, it is mapped to instructions
* on some architectures that don't detect NaN. For
* example, on arm64 the condition `eq` doesn't respect
* NaN results of a `fcmp` instruction.
*/
MONO_INST_NEW (cfg, ins, OP_ICOMPARE_IMM);
ins->dreg = -1;
ins->sreg1 = fcmp_dreg;
ins->inst_imm = 0;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_CEQ);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
}
}
return ins;
}
}
return NULL;
}
MonoInst*
mono_emit_native_types_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
if (mono_class_is_magic_int (cmethod->klass)) {
const char *class_name = m_class_get_name (cmethod->klass);
if (!strcmp ("nint", class_name))
return emit_intrinsics (cfg, cmethod, fsig, args, &type_info [0]);
else
return emit_intrinsics (cfg, cmethod, fsig, args, &type_info [1]);
} else if (mono_class_is_magic_float (cmethod->klass))
return emit_intrinsics (cfg, cmethod, fsig, args, &type_info [2]);
return NULL;
}
#endif /* !DISABLE_JIT */
static gboolean
mono_class_is_magic_assembly (MonoClass *klass)
{
const char *aname = m_class_get_image (klass)->assembly_name;
if (!aname)
return FALSE;
if (!strcmp ("Xamarin.iOS", aname))
return TRUE;
if (!strcmp ("Xamarin.Mac", aname))
return TRUE;
if (!strcmp ("Xamarin.WatchOS", aname))
return TRUE;
if (!strcmp ("Xamarin.MacCatalyst", aname))
return TRUE;
if (!strcmp ("Microsoft.iOS", aname))
return TRUE;
if (!strcmp ("Microsoft.macOS", aname))
return TRUE;
if (!strcmp ("Microsoft.watchOS", aname))
return TRUE;
if (!strcmp ("Microsoft.MacCatalyst", aname))
return TRUE;
/* regression test suite */
if (!strcmp ("builtin-types", aname))
return TRUE;
if (!strcmp ("mini_tests", aname))
return TRUE;
return FALSE;
}
gboolean
mono_class_is_magic_int (MonoClass *klass)
{
static MonoClass *magic_nint_class;
static MonoClass *magic_nuint_class;
if (klass == magic_nint_class)
return TRUE;
if (klass == magic_nuint_class)
return TRUE;
if (magic_nint_class && magic_nuint_class)
return FALSE;
if (!mono_class_is_magic_assembly (klass))
return FALSE;
if (strcmp ("System", m_class_get_name_space (klass)) != 0)
return FALSE;
if (strcmp ("nint", m_class_get_name (klass)) == 0) {
magic_nint_class = klass;
return TRUE;
}
if (strcmp ("nuint", m_class_get_name (klass)) == 0){
magic_nuint_class = klass;
return TRUE;
}
return FALSE;
}
gboolean
mono_class_is_magic_float (MonoClass *klass)
{
static MonoClass *magic_nfloat_class;
if (klass == magic_nfloat_class)
return TRUE;
if (magic_nfloat_class)
return FALSE;
if (!mono_class_is_magic_assembly (klass))
return FALSE;
if (strcmp ("System", m_class_get_name_space (klass)) != 0 && strcmp ("ObjCRuntime", m_class_get_name_space (klass)) != 0)
return FALSE;
if (strcmp ("nfloat", m_class_get_name (klass)) == 0) {
magic_nfloat_class = klass;
/* Assert that we are using the matching assembly */
MonoClassField *value_field = mono_class_get_field_from_name_full (klass, "v", NULL);
g_assert (value_field);
MonoType *t = mono_field_get_type_internal (value_field);
MonoType *native = mini_native_type_replace_type (m_class_get_byval_arg (klass));
if (t->type != native->type)
g_error ("Assembly used for native types '%s' doesn't match this runtime, %s is mapped to %s, expecting %s.\n", m_class_get_image (klass)->name, m_class_get_name (klass), mono_type_full_name (t), mono_type_full_name (native));
return TRUE;
}
return FALSE;
}
gboolean
mini_magic_is_int_type (MonoType *t)
{
if (t->type != MONO_TYPE_I && t->type != MONO_TYPE_I4 && t->type != MONO_TYPE_I8 && t->type != MONO_TYPE_U4 && t->type != MONO_TYPE_U8 && !mono_class_is_magic_int (mono_class_from_mono_type_internal (t)))
return FALSE;
return TRUE;
}
gboolean
mini_magic_is_float_type (MonoType *t)
{
if (t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8 && !mono_class_is_magic_float (mono_class_from_mono_type_internal (t)))
return FALSE;
return TRUE;
}
MonoType*
mini_native_type_replace_type (MonoType *type)
{
MonoClass *klass;
if (type->type != MONO_TYPE_VALUETYPE)
return type;
klass = type->data.klass;
if (mono_class_is_magic_int (klass))
return m_type_is_byref (type) ? mono_class_get_byref_type (mono_defaults.int_class) : mono_get_int_type ();
if (mono_class_is_magic_float (klass))
#if TARGET_SIZEOF_VOID_P == 8
return m_type_is_byref (type) ? mono_class_get_byref_type (mono_defaults.double_class) : m_class_get_byval_arg (mono_defaults.double_class);
#else
return m_type_is_byref (type) ? mono_class_get_byref_type (mono_defaults.single_class) : m_class_get_byval_arg (mono_defaults.single_class);
#endif
return type;
}
| /**
* \file
* intrinsics for variable sized int/floats
*
* Author:
* Rodrigo Kumpera ([email protected])
*
* (C) 2013 Xamarin
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <stdio.h>
#include "mini.h"
#include "ir-emit.h"
#include "glib.h"
typedef struct {
const char *op_name;
short op_table[4];
} IntIntrisic;
typedef struct {
short op_index;
MonoStackType big_stack_type;
MonoStackType small_stack_type;
MonoStackType stack_type;
short conv_4_to_8;
short conv_8_to_4;
short move;
short inc_op;
short dec_op;
short store_op;
short compare_op;
} MagicTypeInfo;
#if TARGET_SIZEOF_VOID_P == 8
#define OP_PT_ADD OP_LADD
#define OP_PT_SUB OP_LSUB
#define OP_PT_MUL OP_LMUL
#define OP_PT_DIV OP_LDIV
#define OP_PT_REM OP_LREM
#define OP_PT_NEG OP_LNEG
#define OP_PT_AND OP_LAND
#define OP_PT_OR OP_LOR
#define OP_PT_XOR OP_LXOR
#define OP_PT_NOT OP_LNOT
#define OP_PT_SHL OP_LSHL
#define OP_PT_SHR OP_LSHR
#define OP_PT_DIV_UN OP_LDIV_UN
#define OP_PT_REM_UN OP_LREM_UN
#define OP_PT_SHR_UN OP_LSHR_UN
#define OP_PT_ADD_IMM OP_LADD_IMM
#define OP_PT_SUB_IMM OP_LSUB_IMM
#define OP_PT_STORE_FP_MEMBASE_REG OP_STORER8_MEMBASE_REG
#define OP_PCOMPARE OP_LCOMPARE
#else
#define OP_PT_ADD OP_IADD
#define OP_PT_SUB OP_ISUB
#define OP_PT_MUL OP_IMUL
#define OP_PT_DIV OP_IDIV
#define OP_PT_REM OP_IREM
#define OP_PT_NEG OP_INEG
#define OP_PT_AND OP_IAND
#define OP_PT_OR OP_IOR
#define OP_PT_XOR OP_IXOR
#define OP_PT_NOT OP_INOT
#define OP_PT_SHL OP_ISHL
#define OP_PT_SHR OP_ISHR
#define OP_PT_DIV_UN OP_IDIV_UN
#define OP_PT_REM_UN OP_IREM_UN
#define OP_PT_SHR_UN OP_ISHR_UN
#define OP_PT_ADD_IMM OP_IADD_IMM
#define OP_PT_SUB_IMM OP_ISUB_IMM
#define OP_PT_STORE_FP_MEMBASE_REG OP_STORER4_MEMBASE_REG
#define OP_PCOMPARE OP_ICOMPARE
#endif
gsize
mini_magic_type_size (MonoCompile *cfg, MonoType *type)
{
if (type->type == MONO_TYPE_I4 || type->type == MONO_TYPE_U4)
return 4;
else if (type->type == MONO_TYPE_I8 || type->type == MONO_TYPE_U8)
return 8;
else if (type->type == MONO_TYPE_R4 && !m_type_is_byref (type) && (!cfg || cfg->r4fp))
return 4;
else if (type->type == MONO_TYPE_R8 && !m_type_is_byref (type))
return 8;
return TARGET_SIZEOF_VOID_P;
}
#ifndef DISABLE_JIT
static const IntIntrisic int_binop[] = {
{ "op_Addition", { OP_PT_ADD, OP_PT_ADD, OP_FADD, OP_RADD } },
{ "op_Subtraction", { OP_PT_SUB, OP_PT_SUB, OP_FSUB, OP_RSUB } },
{ "op_Multiply", { OP_PT_MUL, OP_PT_MUL, OP_FMUL, OP_RMUL } },
{ "op_Division", { OP_PT_DIV, OP_PT_DIV_UN, OP_FDIV, OP_RDIV } },
{ "op_Modulus", { OP_PT_REM, OP_PT_REM_UN, OP_FREM, OP_RREM } },
{ "op_BitwiseAnd", { OP_PT_AND, OP_PT_AND } },
{ "op_BitwiseOr", { OP_PT_OR, OP_PT_OR } },
{ "op_ExclusiveOr", { OP_PT_XOR, OP_PT_XOR } },
{ "op_LeftShift", { OP_PT_SHL, OP_PT_SHL } },
{ "op_RightShift", { OP_PT_SHR, OP_PT_SHR_UN } },
};
static const IntIntrisic int_unnop[] = {
{ "op_UnaryPlus", { OP_MOVE, OP_MOVE, OP_FMOVE, OP_RMOVE } },
{ "op_UnaryNegation", { OP_PT_NEG, OP_PT_NEG, OP_FNEG, OP_RNEG } },
{ "op_OnesComplement", { OP_PT_NOT, OP_PT_NOT, OP_FNOT, OP_RNOT } },
};
static const IntIntrisic int_cmpop[] = {
{ "op_Inequality", { OP_ICNEQ, OP_ICNEQ, OP_FCNEQ, OP_RCNEQ } },
{ "op_Equality", { OP_ICEQ, OP_ICEQ, OP_FCEQ, OP_RCEQ } },
{ "op_GreaterThan", { OP_ICGT, OP_ICGT_UN, OP_FCGT, OP_RCGT } },
{ "op_GreaterThanOrEqual", { OP_ICGE, OP_ICGE_UN, OP_FCLT_UN, OP_RCLT_UN } },
{ "op_LessThan", { OP_ICLT, OP_ICLT_UN, OP_FCLT, OP_RCLT } },
{ "op_LessThanOrEqual", { OP_ICLE, OP_ICLE_UN, OP_FCGT_UN, OP_RCGT_UN } },
};
static const MagicTypeInfo type_info[] = {
//nint
{ 0, STACK_I8, STACK_I4, STACK_PTR, OP_ICONV_TO_I8, OP_LCONV_TO_I4, OP_MOVE, OP_PT_ADD_IMM, OP_PT_SUB_IMM, OP_STORE_MEMBASE_REG, OP_PCOMPARE },
//nuint
{ 1, STACK_I8, STACK_I4, STACK_PTR, OP_ICONV_TO_U8, OP_LCONV_TO_U4, OP_MOVE, OP_PT_ADD_IMM, OP_PT_SUB_IMM, OP_STORE_MEMBASE_REG, OP_PCOMPARE },
//nfloat
{ 2, STACK_R8, STACK_R8, STACK_R8, OP_FCONV_TO_R8, OP_FCONV_TO_R4, OP_FMOVE, 0, 0, OP_PT_STORE_FP_MEMBASE_REG, 0 },
};
static MonoInst*
emit_narrow (MonoCompile *cfg, const MagicTypeInfo *info, int sreg)
{
MonoInst *ins;
MONO_INST_NEW (cfg, ins, info->conv_8_to_4);
ins->sreg1 = sreg;
if (info->conv_8_to_4 == OP_FCONV_TO_R4)
ins->type = cfg->r4_stack_type;
else
ins->type = info->small_stack_type;
ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
MONO_ADD_INS (cfg->cbb, ins);
return mono_decompose_opcode (cfg, ins);
}
static MonoInst*
emit_widen (MonoCompile *cfg, const MagicTypeInfo *info, int sreg)
{
MonoInst *ins;
if (cfg->r4fp && info->conv_4_to_8 == OP_FCONV_TO_R8)
MONO_INST_NEW (cfg, ins, OP_RCONV_TO_R8);
else
MONO_INST_NEW (cfg, ins, info->conv_4_to_8);
ins->sreg1 = sreg;
ins->type = info->big_stack_type;
ins->dreg = alloc_dreg (cfg, info->big_stack_type);
MONO_ADD_INS (cfg->cbb, ins);
return mono_decompose_opcode (cfg, ins);
}
static MonoInst*
emit_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, const MagicTypeInfo *info)
{
int i = 0;
const char *name = cmethod->name;
MonoInst *ins;
int type_index;
MonoStackType stack_type;
if (info->op_index == 2 && cfg->r4fp && TARGET_SIZEOF_VOID_P == 4) {
type_index = 3;
stack_type = STACK_R4;
} else {
type_index = info->op_index;
stack_type = info->stack_type;
}
if (!strcmp ("op_Implicit", name) || !strcmp ("op_Explicit", name)) {
int source_size = mini_magic_type_size (cfg, fsig->params [0]);
int dest_size = mini_magic_type_size (cfg, fsig->ret);
switch (info->big_stack_type) {
case STACK_I8:
if (!mini_magic_is_int_type (fsig->params [0]) || !mini_magic_is_int_type (fsig->ret))
return NULL;
break;
case STACK_R8:
if (!mini_magic_is_float_type (fsig->params [0]) || !mini_magic_is_float_type (fsig->ret))
return NULL;
break;
default:
g_assert_not_reached ();
}
//4 -> 4 or 8 -> 8
if (source_size == dest_size)
return args [0];
//4 -> 8
if (source_size < dest_size)
return emit_widen (cfg, info, args [0]->dreg);
//8 -> 4
return emit_narrow (cfg, info, args [0]->dreg);
}
if (!strcmp (".ctor", name)) {
gboolean is_ldaddr = args [0]->opcode == OP_LDADDR;
int arg0 = args [1]->dreg;
int arg_size = mini_magic_type_size (cfg, fsig->params [0]);
if (arg_size > TARGET_SIZEOF_VOID_P) //8 -> 4
arg0 = emit_narrow (cfg, info, arg0)->dreg;
else if (arg_size < TARGET_SIZEOF_VOID_P) //4 -> 8
arg0 = emit_widen (cfg, info, arg0)->dreg;
if (is_ldaddr) { /*Eliminate LDADDR if it's initing a local var*/
int dreg = ((MonoInst*)args [0]->inst_p0)->dreg;
NULLIFY_INS (args [0]);
EMIT_NEW_UNALU (cfg, ins, info->move, dreg, arg0);
cfg->has_indirection = TRUE;
} else {
EMIT_NEW_STORE_MEMBASE (cfg, ins, info->store_op, args [0]->dreg, 0, arg0);
}
return ins;
}
if (!strcmp ("op_Increment", name) || !strcmp ("op_Decrement", name)) {
gboolean inc = !strcmp ("op_Increment", name);
/* FIXME float inc is too complex to bother with*/
//this is broken with ints too
// if (!info->inc_op)
return NULL;
/* We have IR for inc/dec */
MONO_INST_NEW (cfg, ins, inc ? info->inc_op : info->dec_op);
ins->dreg = alloc_dreg (cfg, (MonoStackType)info->stack_type);
ins->sreg1 = args [0]->dreg;
ins->inst_imm = 1;
ins->type = info->stack_type;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
for (i = 0; i < sizeof (int_binop) / sizeof (IntIntrisic); ++i) {
if (!strcmp (int_binop [i].op_name, name)) {
if (!int_binop [i].op_table [info->op_index])
return NULL;
g_assert (int_binop [i].op_table [type_index]);
MONO_INST_NEW (cfg, ins, int_binop [i].op_table [type_index]);
ins->dreg = alloc_dreg (cfg, stack_type);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
ins->type = stack_type;
MONO_ADD_INS (cfg->cbb, ins);
return mono_decompose_opcode (cfg, ins);
}
}
for (i = 0; i < sizeof (int_unnop) / sizeof (IntIntrisic); ++i) {
if (!strcmp (int_unnop [i].op_name, name)) {
g_assert (int_unnop [i].op_table [type_index]);
MONO_INST_NEW (cfg, ins, int_unnop [i].op_table [type_index]);
ins->dreg = alloc_dreg (cfg, stack_type);
ins->sreg1 = args [0]->dreg;
ins->type = stack_type;
MONO_ADD_INS (cfg->cbb, ins);
return ins;
}
}
for (i = 0; i < sizeof (int_cmpop) / sizeof (IntIntrisic); ++i) {
if (!strcmp (int_cmpop [i].op_name, name)) {
short op_cmp = int_cmpop [i].op_table [type_index];
g_assert (op_cmp);
if (info->compare_op) {
MONO_INST_NEW (cfg, ins, info->compare_op);
ins->dreg = -1;
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, op_cmp);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
} else {
MONO_INST_NEW (cfg, ins, op_cmp);
guint32 fcmp_dreg = ins->dreg = alloc_ireg (cfg);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
if (op_cmp == OP_FCLT_UN || op_cmp == OP_FCGT_UN || op_cmp == OP_RCLT_UN || op_cmp == OP_RCGT_UN) {
/* we have to negate the result of this comparison:
* - op_GreaterThanOrEqual maps to NOT x OP_FCLT_UN / OP_RCLT_UN
* - op_LessThanOrEqual maps to NOT x OP_FCGT_UN / OP_RCGT_UN
*
* this matches generated bytecode by C# when doing the
* same operations on float/double. the `_UN` suffix says
* that if an operand is NaN, the result is true. If
* OP_FCGE/OP_FCLE is used, it is mapped to instructions
* on some architectures that don't detect NaN. For
* example, on arm64 the condition `eq` doesn't respect
* NaN results of a `fcmp` instruction.
*/
MONO_INST_NEW (cfg, ins, OP_ICOMPARE_IMM);
ins->dreg = -1;
ins->sreg1 = fcmp_dreg;
ins->inst_imm = 0;
MONO_ADD_INS (cfg->cbb, ins);
MONO_INST_NEW (cfg, ins, OP_CEQ);
ins->dreg = alloc_preg (cfg);
ins->type = STACK_I4;
MONO_ADD_INS (cfg->cbb, ins);
}
}
return ins;
}
}
return NULL;
}
MonoInst*
mono_emit_native_types_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
if (mono_class_is_magic_int (cmethod->klass)) {
const char *class_name = m_class_get_name (cmethod->klass);
if (!strcmp ("nint", class_name))
return emit_intrinsics (cfg, cmethod, fsig, args, &type_info [0]);
else
return emit_intrinsics (cfg, cmethod, fsig, args, &type_info [1]);
} else if (mono_class_is_magic_float (cmethod->klass))
return emit_intrinsics (cfg, cmethod, fsig, args, &type_info [2]);
return NULL;
}
#endif /* !DISABLE_JIT */
static gboolean
mono_class_is_magic_assembly (MonoClass *klass)
{
const char *aname = m_class_get_image (klass)->assembly_name;
if (!aname)
return FALSE;
if (!strcmp ("Xamarin.iOS", aname))
return TRUE;
if (!strcmp ("Xamarin.Mac", aname))
return TRUE;
if (!strcmp ("Xamarin.WatchOS", aname))
return TRUE;
if (!strcmp ("Xamarin.MacCatalyst", aname))
return TRUE;
if (!strcmp ("Microsoft.iOS", aname))
return TRUE;
if (!strcmp ("Microsoft.macOS", aname))
return TRUE;
if (!strcmp ("Microsoft.watchOS", aname))
return TRUE;
if (!strcmp ("Microsoft.MacCatalyst", aname))
return TRUE;
/* regression test suite */
if (!strcmp ("builtin-types", aname))
return TRUE;
if (!strcmp ("mini_tests", aname))
return TRUE;
return FALSE;
}
gboolean
mono_class_is_magic_int (MonoClass *klass)
{
static MonoClass *magic_nint_class;
static MonoClass *magic_nuint_class;
if (klass == magic_nint_class)
return TRUE;
if (klass == magic_nuint_class)
return TRUE;
if (magic_nint_class && magic_nuint_class)
return FALSE;
if (!mono_class_is_magic_assembly (klass))
return FALSE;
if (strcmp ("System", m_class_get_name_space (klass)) != 0)
return FALSE;
if (strcmp ("nint", m_class_get_name (klass)) == 0) {
magic_nint_class = klass;
return TRUE;
}
if (strcmp ("nuint", m_class_get_name (klass)) == 0){
magic_nuint_class = klass;
return TRUE;
}
return FALSE;
}
gboolean
mono_class_is_magic_float (MonoClass *klass)
{
static MonoClass *magic_nfloat_class;
if (klass == magic_nfloat_class)
return TRUE;
if (magic_nfloat_class)
return FALSE;
if (!mono_class_is_magic_assembly (klass))
return FALSE;
if (strcmp ("System", m_class_get_name_space (klass)) != 0 && strcmp ("ObjCRuntime", m_class_get_name_space (klass)) != 0)
return FALSE;
if (strcmp ("nfloat", m_class_get_name (klass)) == 0) {
magic_nfloat_class = klass;
/* Assert that we are using the matching assembly */
MonoClassField *value_field = mono_class_get_field_from_name_full (klass, "v", NULL);
g_assert (value_field);
MonoType *t = mono_field_get_type_internal (value_field);
MonoType *native = mini_native_type_replace_type (m_class_get_byval_arg (klass));
if (t->type != native->type)
g_error ("Assembly used for native types '%s' doesn't match this runtime, %s is mapped to %s, expecting %s.\n", m_class_get_image (klass)->name, m_class_get_name (klass), mono_type_full_name (t), mono_type_full_name (native));
return TRUE;
}
return FALSE;
}
gboolean
mini_magic_is_int_type (MonoType *t)
{
if (t->type != MONO_TYPE_I && t->type != MONO_TYPE_I4 && t->type != MONO_TYPE_I8 && t->type != MONO_TYPE_U4 && t->type != MONO_TYPE_U8 && !mono_class_is_magic_int (mono_class_from_mono_type_internal (t)))
return FALSE;
return TRUE;
}
gboolean
mini_magic_is_float_type (MonoType *t)
{
if (t->type != MONO_TYPE_R4 && t->type != MONO_TYPE_R8 && !mono_class_is_magic_float (mono_class_from_mono_type_internal (t)))
return FALSE;
return TRUE;
}
MonoType*
mini_native_type_replace_type (MonoType *type)
{
MonoClass *klass;
if (type->type != MONO_TYPE_VALUETYPE)
return type;
klass = type->data.klass;
if (mono_class_is_magic_int (klass))
return m_type_is_byref (type) ? mono_class_get_byref_type (mono_defaults.int_class) : mono_get_int_type ();
if (mono_class_is_magic_float (klass))
#if TARGET_SIZEOF_VOID_P == 8
return m_type_is_byref (type) ? mono_class_get_byref_type (mono_defaults.double_class) : m_class_get_byval_arg (mono_defaults.double_class);
#else
return m_type_is_byref (type) ? mono_class_get_byref_type (mono_defaults.single_class) : m_class_get_byval_arg (mono_defaults.single_class);
#endif
return type;
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-s390x.c | /**
* @file
* @author - Neale Ferguson ([email protected])
*
* @section description
* Function - S/390 backend for the Mono code generator.
*
* Date - January, 2004
*
* Derivation - From mini-x86 & mini-ppc by -
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
*/
/*------------------------------------------------------------------*/
/* D e f i n e s */
/*------------------------------------------------------------------*/
#define MAX_ARCH_DELEGATE_PARAMS 10
#define EMIT_COND_BRANCH(ins,cond) \
{ \
if (ins->inst_true_bb->native_offset) { \
int displace; \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_brc (code, cond, displace); \
} else { \
s390_jcl (code, cond, displace); \
} \
} else { \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_true_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, cond, 0); \
} \
}
#define EMIT_UNCOND_BRANCH(ins) \
{ \
if (ins->inst_target_bb->native_offset) { \
int displace; \
displace = ((cfg->native_code + \
ins->inst_target_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_brc (code, S390_CC_UN, displace); \
} else { \
s390_jcl (code, S390_CC_UN, displace); \
} \
} else { \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_target_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, S390_CC_UN, 0); \
} \
}
#define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) \
do { \
mono_add_patch_info (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_EXC, exc_name); \
s390_jcl (code, cond, 0); \
} while (0);
#define EMIT_COMP_AND_BRANCH(ins, cab, cmp) \
{ \
if (ins->inst_true_bb->native_offset) { \
int displace; \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_##cab (code, ins->sreg1, ins->sreg2, \
ins->sreg3, displace); \
} else { \
s390_##cmp (code, ins->sreg1, ins->sreg2); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
s390_jcl (code, ins->sreg3, displace); \
} \
} else { \
s390_##cmp (code, ins->sreg1, ins->sreg2); \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_true_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, ins->sreg3, 0); \
} \
}
#define EMIT_COMP_AND_BRANCH_IMM(ins, cab, cmp, lat, logical) \
{ \
if (ins->inst_true_bb->native_offset) { \
int displace; \
if ((ins->backend.data == 0) && (!logical)) { \
s390_##lat (code, ins->sreg1, ins->sreg1); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_brc (code, ins->sreg3, displace); \
} else { \
s390_jcl (code, ins->sreg3, displace); \
} \
} else { \
S390_SET (code, s390_r0, ins->backend.data); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_##cab (code, ins->sreg1, s390_r0, \
ins->sreg3, displace); \
} else { \
s390_##cmp (code, ins->sreg1, s390_r0); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
s390_jcl (code, ins->sreg3, displace); \
} \
} \
} else { \
if ((ins->backend.data == 0) && (!logical)) { \
s390_##lat (code, ins->sreg1, ins->sreg1); \
} else { \
S390_SET (code, s390_r0, ins->backend.data); \
s390_##cmp (code, ins->sreg1, s390_r0); \
} \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_true_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, ins->sreg3, 0); \
} \
}
#define CHECK_SRCDST_COM \
if (ins->dreg == ins->sreg2) { \
src2 = ins->sreg1; \
} else { \
src2 = ins->sreg2; \
if (ins->dreg != ins->sreg1) { \
s390_lgr (code, ins->dreg, ins->sreg1); \
} \
}
#define CHECK_SRCDST_NCOM \
if (ins->dreg == ins->sreg2) { \
src2 = s390_r13; \
s390_lgr (code, s390_r13, ins->sreg2); \
} else { \
src2 = ins->sreg2; \
} \
if (ins->dreg != ins->sreg1) { \
s390_lgr (code, ins->dreg, ins->sreg1); \
}
#define CHECK_SRCDST_COM_I \
if (ins->dreg == ins->sreg2) { \
src2 = ins->sreg1; \
} else { \
src2 = ins->sreg2; \
if (ins->dreg != ins->sreg1) { \
s390_lgfr (code, ins->dreg, ins->sreg1); \
} \
}
#define CHECK_SRCDST_NCOM_I \
if (ins->dreg == ins->sreg2) { \
src2 = s390_r13; \
s390_lgfr (code, s390_r13, ins->sreg2); \
} else { \
src2 = ins->sreg2; \
} \
if (ins->dreg != ins->sreg1) { \
s390_lgfr (code, ins->dreg, ins->sreg1); \
}
#define CHECK_SRCDST_COM_F \
if (ins->dreg == ins->sreg2) { \
src2 = ins->sreg1; \
} else { \
src2 = ins->sreg2; \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
}
#define CHECK_SRCDST_NCOM_F(op) \
if (ins->dreg == ins->sreg2) { \
s390_lgdr (code, s390_r0, s390_f15); \
s390_ldr (code, s390_f15, ins->sreg2); \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, s390_f15); \
s390_ldgr (code, s390_f15, s390_r0); \
} else { \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, ins->sreg2); \
}
#define CHECK_SRCDST_NCOM_FR(op, m) \
s390_lgdr (code, s390_r1, s390_f14); \
if (ins->dreg == ins->sreg2) { \
s390_lgdr (code, s390_r0, s390_f15); \
s390_ldr (code, s390_f15, ins->sreg2); \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, s390_f15, m, s390_f14); \
s390_ldgr (code, s390_f15, s390_r0); \
} else { \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, ins->sreg2, m, s390_f14); \
} \
s390_ldgr (code, s390_f14, s390_r1);
#undef DEBUG
#define DEBUG(a) if (cfg->verbose_level > 1) a
#define MAX_EXC 16
#define S390_TRACE_STACK_SIZE (5*sizeof(gpointer)+4*sizeof(gdouble))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/*
* imt trampoline size values
*/
#define CMP_SIZE 24
#define LOADCON_SIZE 20
#define LOAD_SIZE 6
#define BR_SIZE 2
#define JUMP_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
/*========================= End of Defines =========================*/
/*------------------------------------------------------------------*/
/* I n c l u d e s */
/*------------------------------------------------------------------*/
#include "mini.h"
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/profiler-private.h>
#include <mono/utils/mono-error.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/unlocked.h>
#include "mini-s390x.h"
#include "cpu-s390x.h"
#include "jit-icalls.h"
#include "ir-emit.h"
#include "mini-gc.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
/*========================= End of Includes ========================*/
/*------------------------------------------------------------------*/
/* T y p e d e f s */
/*------------------------------------------------------------------*/
/**
* Track stack use
*/
typedef struct {
guint stack_size,
code_size,
parm_size,
retStruct;
} size_data;
/**
* ABI - register use in calls etc.
*/
typedef enum {
RegTypeGeneral,
RegTypeBase,
RegTypeFP,
RegTypeFPR4,
RegTypeStructByVal,
RegTypeStructByValInFP,
RegTypeStructByAddr
} ArgStorage;
/**
* Track method arguments
*/
typedef struct {
gint32 offset; /* offset from caller's stack */
guint16 vtsize; /* in param area */
guint8 reg;
ArgStorage regtype;
guint32 size; /* Size of structure used by RegTypeStructByVal */
gint32 type; /* Data type of argument */
} ArgInfo;
/**
* Call information - parameters and stack use for s390x ABI
*/
struct CallInfo {
int nargs;
int lastgr;
guint32 stack_usage;
guint32 struct_ret;
ArgInfo ret;
ArgInfo sigCookie;
size_data sz;
int vret_arg_index;
MonoMethodSignature *sig;
ArgInfo args [1];
};
/**
* Registers used in parameter passing
*/
typedef struct {
gint64 gr[5]; /* R2-R6 */
gdouble fp[3]; /* F0-F2 */
} __attribute__ ((__packed__)) RegParm;
/*========================= End of Typedefs ========================*/
/*------------------------------------------------------------------*/
/* P r o t o t y p e s */
/*------------------------------------------------------------------*/
static guint8 * backUpStackPtr(MonoCompile *, guint8 *);
static void add_general (guint *, size_data *, ArgInfo *);
static void add_stackParm (guint *, size_data *, ArgInfo *, gint, ArgStorage);
static void add_float (guint *, size_data *, ArgInfo *, gboolean);
static CallInfo * get_call_info (MonoMemPool *, MonoMethodSignature *);
static guchar * emit_float_to_int (MonoCompile *, guchar *, int, int, int, gboolean);
static __inline__ void emit_unwind_regs(MonoCompile *, guint8 *, int, int, long);
static void compare_and_branch(MonoBasicBlock *, MonoInst *, int, gboolean);
static __inline__ guint8 * emit_call(MonoCompile *, guint8 *, MonoJumpInfoType, gconstpointer);
static guint8 * emit_thunk(guint8 *, gconstpointer);
static void create_thunk(MonoCompile *, guint8 *, guint8 *, gpointer);
static void update_thunk(MonoCompile *, guint8 *, gpointer);
static void emit_patch_full (MonoCompile *, MonoJumpInfo *, guint8 *, gpointer, int);
/*========================= End of Prototypes ======================*/
/*------------------------------------------------------------------*/
/* G l o b a l V a r i a b l e s */
/*------------------------------------------------------------------*/
/**
* The single-step trampoline
*/
static gpointer ss_trampoline;
/**
* The breakpoint trampoline
*/
static gpointer bp_trampoline;
/**
* Constants used in debugging - map general register names
*/
static const char * grNames[] = {
"s390_r0", "s390_sp", "s390_r2", "s390_r3", "s390_r4",
"s390_r5", "s390_r6", "s390_r7", "s390_r8", "s390_r9",
"s390_r10", "s390_r11", "s390_r12", "s390_r13", "s390_r14",
"s390_r15"
};
/**
* Constants used in debugging - map floating point register names
*/
static const char * fpNames[] = {
"s390_f0", "s390_f1", "s390_f2", "s390_f3", "s390_f4",
"s390_f5", "s390_f6", "s390_f7", "s390_f8", "s390_f9",
"s390_f10", "s390_f11", "s390_f12", "s390_f13", "s390_f14",
"s390_f15"
};
/**
* Constants used in debugging - map vector register names
*/
static const char * vrNames[] = {
"vr0", "vr1", "vr2", "vr3", "vr4", "vr5", "vr6", "vr7",
"vr8", "vr9", "vr10", "vr11", "vr12", "vr13", "vr14", "vr15",
"vr16", "vr17", "vr18", "vr19", "vr20", "vr21", "vr22", "vr23",
"vr24", "vr25", "vr26", "vr27", "vr28", "vr29", "vr30", "vr31"
};
#if 0
/**
* Constants used in debugging - ABI register types
*/
static const char *typeParm[] = { "General", "Base", "FPR8", "FPR4", "StructByVal",
"StructByValInFP", "ByAddr"};
#endif
/*====================== End of Global Variables ===================*/
static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math")
static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf, "System", "MathF")
/**
*
* @brief Return general register name
*
* @param[in] register number
* @returns Name of register
*
* Returns the name of the general register specified by the input parameter.
*/
const char*
mono_arch_regname (int reg)
{
if (reg >= 0 && reg < 16)
return grNames [reg];
else
return "unknown";
}
/*========================= End of Function ========================*/
/**
*
* @brief Return floating point register name
*
* @param[in] register number
* @returns Name of register
*
* Returns the name of the FP register specified by the input parameter.
*/
const char*
mono_arch_fregname (int reg)
{
if (reg >= 0 && reg < 16)
return fpNames [reg];
else
return "unknown";
}
/*========================= End of Function ========================*/
/**
*
* @brief Return vector register name
*
* @param[in] register number
* @returns Name of register
*
* Returns the name of the vector register specified by the input parameter.
*/
const char *
mono_arch_xregname (int reg)
{
if (reg < s390_VR_NREG)
return vrNames [reg];
else
return "unknown";
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return argument information
*
* @param[in] @csig - Method signature
* @param[in] @param_count - Number of parameters to consider
* @param[out] @arg_info - An array in which to store results
* @returns Size of the activation frame
*
* Gathers information on parameters such as size, alignment, and padding.
* arg_info should be large * enough to hold param_count + 1 entries.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig,
int param_count,
MonoJitArgumentInfo *arg_info)
{
int k, frame_size = 0;
int size, align, pad;
int offset = 8;
if (MONO_TYPE_ISSTRUCT (csig->ret)) {
frame_size += sizeof (target_mgreg_t);
offset += 8;
}
arg_info [0].offset = offset;
if (csig->hasthis) {
frame_size += sizeof (target_mgreg_t);
offset += 8;
}
arg_info [0].size = frame_size;
for (k = 0; k < param_count; k++) {
if (csig->pinvoke && !csig->marshalling_disabled)
size = mono_type_native_stack_size (csig->params [k], (guint32 *) &align);
else
size = mini_type_stack_size (csig->params [k], &align);
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
frame_size += size;
arg_info [k + 1].pad = 0;
arg_info [k + 1].size = size;
offset += pad;
arg_info [k + 1].offset = offset;
offset += size;
}
align = MONO_ARCH_FRAME_ALIGNMENT;
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
return frame_size;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit an s390x move operation
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @dr - Destination register
* @param[in] @ins - Current instruction
* @param[in] @src - Instruction representing the source of move
*
* Emit a move instruction for VT parameters
*/
static void __inline__
emit_new_move(MonoCompile *cfg, int dr, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst *) ins->inst_p0;
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL);
MonoInst *load;
MonoInst *move;
int size;
if (call->signature->pinvoke && !call->signature->marshalling_disabled) {
size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL);
vtcopy->backend.is_pinvoke = 1;
} else {
size = ins->backend.size;
}
EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
MONO_INST_NEW (cfg, move, OP_S390_MOVE);
move->sreg2 = load->dreg;
move->inst_offset = 0;
move->sreg1 = src->dreg;
move->inst_imm = 0;
move->backend.size = size;
MONO_ADD_INS (cfg->cbb, move);
if (dr != 0)
MONO_EMIT_NEW_UNALU(cfg, OP_MOVE, dr, load->dreg);
else
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, load->dreg);
}
/*========================= End of Function ========================*/
/**
*
* @brief Generate output sequence for VT register parameters
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @dr - Destination register
* @param[in] @ins - Current instruction
* @param[in] @src - Instruction representing the source
*
* Emit the output of structures for calls whose address is placed in a register.
*/
static void __inline__
emit_outarg_vtr(MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst *) ins->inst_p0;
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
int reg = mono_alloc_preg (cfg);
switch (ins->backend.size) {
case 0:
MONO_EMIT_NEW_ICONST(cfg, reg, 0);
break;
case 1:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE,
reg, src->dreg, 0);
break;
case 2:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE,
reg, src->dreg, 0);
break;
case 4:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE,
reg, src->dreg, 0);
break;
case 8:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE,
reg, src->dreg, 0);
break;
default:
emit_new_move (cfg, reg, ins, src);
}
mono_call_inst_add_outarg_reg(cfg, call, reg, ainfo->reg, FALSE);
}
/*========================= End of Function ========================*/
/**
*
* @brief Generate output sequence for VT stack parameters
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @dr - Destination register
* @param[in] @ins - Current instruction
* @param[in] @src - Instruction representing the source
*
* Emit the output of structures for calls whose address is placed on the stack
*/
static void __inline__
emit_outarg_vts(MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
int tmpr = mono_alloc_preg (cfg);
switch (ins->backend.size) {
case 0:
MONO_EMIT_NEW_ICONST(cfg, tmpr, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 1:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 2:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 4:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 8:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
default: {
emit_new_move (cfg, 0, ins, src);
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Generate unwind information for range of registers
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @code - Location of code
* @param[in] @start - Starting register
* @param[in] @end - Ending register
* @param[in] @offset - Offset in stack
*
* Emit unwind information for a range of registers.
*/
static void __inline__
emit_unwind_regs(MonoCompile *cfg, guint8 *code, int start, int end, long offset)
{
int i;
for (i = start; i <= end; i++) {
mono_emit_unwind_op_offset (cfg, code, i, offset);
mini_gc_set_slot_type_from_cfa (cfg, offset, SLOT_NOREF);
offset += sizeof(gulong);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Get previous stack frame pointer
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @code - Location of code
* @returns Previous stack pointer
*
* Retrieve the stack pointer of the previous frame
*/
static guint8 *
backUpStackPtr(MonoCompile *cfg, guint8 *code)
{
int stackSize = cfg->stack_usage;
if (cfg->flags & MONO_CFG_HAS_ALLOCA) {
s390_lg (code, STK_BASE, 0, STK_BASE, 0);
} else {
if (cfg->frame_reg != STK_BASE)
s390_lgr (code, STK_BASE, cfg->frame_reg);
if (s390_is_imm16 (stackSize)) {
s390_aghi (code, STK_BASE, stackSize);
} else if (s390_is_imm32 (stackSize)) {
s390_agfi (code, STK_BASE, stackSize);
} else {
while (stackSize > INT_MAX) {
s390_aghi (code, STK_BASE, INT_MAX);
stackSize -= INT_MAX;
}
s390_agfi (code, STK_BASE, stackSize);
}
}
return (code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific CPU initialization
*
* Perform CPU specific initialization to execute managed code.
*/
void
mono_arch_cpu_init (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Archictecture specific initialization
*
*
* Initialize architecture specific code:
* - Define trigger pages for debugger
* - Generate breakpoint code stub
*/
void
mono_arch_init (void)
{
mono_set_partial_sharing_supported (FALSE);
if (!mono_aot_only)
bp_trampoline = mini_get_breakpoint_trampoline();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific cleaup code
*
*
* Clean up before termination:
* - Free the trigger pages
*/
void
mono_arch_cleanup (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check for fast TLS access
*
* @returns True
*
* Returns whether we use fast inlined thread local storage managed access,
* instead of falling back to native code.
*/
gboolean
mono_arch_have_fast_tls (void)
{
return TRUE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check of mono optimizations
*
* @param[out] @exclude_mask - Optimization exclusion mask
* @returns Optimizations supported on this CPU
*
* Returns the optimizations supported on this CPU
*/
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
guint32 opts = 0;
/*
* No s390-specific optimizations yet
*/
*exclude_mask = 0;
return opts;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific allocation of integer variables
*
* @param[in] @cfg - MonoCompile control block
* @returns A list of integer variables
*
* Returns a list of allocatable integer variables
*/
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
/* we can only allocate 32 bit values */
if (mono_is_regsize_var(ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
}
}
return vars;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific determination of usable integer registers
*
* @param[in] @cfg - MonoCompile control block
* @returns A list of allocatable registers
*
* Returns a list of usable integer registers
*/
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
MonoMethodHeader *header;
int i, top = 13;
header = cfg->header;
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
cfg->frame_reg = s390_r11;
/* FIXME: s390_r12 is reserved for bkchain_reg. Only reserve it if needed */
top = 12;
for (i = 8; i < top; ++i) {
if ((cfg->frame_reg != i) &&
//!((cfg->uses_rgctx_reg) && (i == MONO_ARCH_IMT_REG)))
(i != MONO_ARCH_IMT_REG))
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
}
return regs;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific flush of instruction cache
*
* @param[in] @code - Start of code
* @param[in] @size - Amount to be flushed
*
* Flush the CPU icache.
*/
void
mono_arch_flush_icache (guint8 *code, gint size)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Add an integer register parameter
*
* @param[in] @gr - Address of current register number
* @param[in] @sz - Stack size data
* @param[in] @ainfo - Parameter information
*
* Assign a parameter to a general register or spill it onto the stack
*/
static void inline
add_general (guint *gr, size_data *sz, ArgInfo *ainfo)
{
if (*gr > S390_LAST_ARG_REG) {
sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long));
ainfo->offset = sz->stack_size;
ainfo->reg = STK_BASE;
ainfo->regtype = RegTypeBase;
sz->stack_size += sizeof(long);
sz->code_size += 12;
} else {
ainfo->reg = *gr;
ainfo->regtype = RegTypeGeneral;
sz->code_size += 8;
}
(*gr) ++;
}
/*========================= End of Function ========================*/
/**
*
* @brief Add a structure variable to parameter list
*
* @param[in] @gr - Address of current register number
* @param[in] @sz - Stack size data
* @param[in] @ainfo - Parameter information
* @param[in] @size - Size of parameter
* @param[in] @type - Type of stack parameter (reference or value)
*
* Assign a structure address to a register or spill it onto the stack
*/
static void inline
add_stackParm (guint *gr, size_data *sz, ArgInfo *ainfo, gint size, ArgStorage type)
{
if (*gr > S390_LAST_ARG_REG) {
sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long));
ainfo->reg = STK_BASE;
ainfo->offset = sz->stack_size;
sz->stack_size += sizeof (target_mgreg_t);
sz->parm_size += sizeof(gpointer);
} else {
ainfo->reg = *gr;
}
(*gr) ++;
ainfo->regtype = type;
ainfo->size = size;
ainfo->vtsize = size;
sz->parm_size += size;
}
/*========================= End of Function ========================*/
/**
*
* @brief Add a floating point register parameter
*
* @param[in] @fr - Address of current register number
* @param[in] @sz - Stack size data
* @param[in] @ainfo - Parameter information
* @param[in] @isDouble - Precision of parameter
*
* Assign a parameter to a FP register or spill it onto the stack
*/
static void inline
add_float (guint *fr, size_data *sz, ArgInfo *ainfo, gboolean isDouble)
{
if ((*fr) <= S390_LAST_FPARG_REG) {
if (isDouble)
ainfo->regtype = RegTypeFP;
else
ainfo->regtype = RegTypeFPR4;
ainfo->reg = *fr;
sz->code_size += 4;
(*fr) += 2;
}
else {
ainfo->offset = sz->stack_size;
ainfo->reg = STK_BASE;
sz->code_size += 4;
sz->stack_size += sizeof(double);
ainfo->regtype = RegTypeBase;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Extract information about call parameters and stack use
*
* @param[in] @mp - Mono Memory Pool
* @param[in] @sig - Mono Method Signature
* @returns Information about the parameters and stack usage for a call
*
* Determine the amount of space required for code and stack. In addition
* determine starting points for stack-based parameters, and area for
* structures being returned on the stack.
*/
static CallInfo *
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
guint i, fr, gr, size, pstart;
int nParm = sig->hasthis + sig->param_count;
MonoType *ret_type;
guint32 simpleType, align;
gboolean is_pinvoke = sig->pinvoke;
CallInfo *cinfo;
size_data *sz;
if (mp)
cinfo = (CallInfo *) mono_mempool_alloc0 (mp, sizeof (CallInfo) + sizeof (ArgInfo) * nParm);
else
cinfo = (CallInfo *) g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * nParm);
fr = 0;
gr = s390_r2;
nParm = 0;
cinfo->struct_ret = 0;
cinfo->sig = sig;
sz = &cinfo->sz;
sz->retStruct = 0;
sz->stack_size = S390_MINIMAL_STACK_SIZE;
sz->code_size = 0;
sz->parm_size = 0;
align = 0;
size = 0;
/*----------------------------------------------------------*/
/* We determine the size of the return code/stack in case we*/
/* need to reserve a register to be used to address a stack */
/* area that the callee will use. */
/*----------------------------------------------------------*/
ret_type = mini_get_underlying_type (sig->ret);
simpleType = ret_type->type;
enum_retvalue:
switch (simpleType) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_STRING:
cinfo->ret.reg = s390_r2;
sz->code_size += 4;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
cinfo->ret.reg = s390_f0;
sz->code_size += 4;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
cinfo->ret.reg = s390_r2;
sz->code_size += 4;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
cinfo->ret.reg = s390_r2;
sz->code_size += 4;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE: {
MonoClass *klass = mono_class_from_mono_type_internal (sig->ret);
if (m_class_is_enumtype (klass)) {
simpleType = mono_class_enum_basetype_internal (klass)->type;
goto enum_retvalue;
}
size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled);
cinfo->struct_ret = 1;
cinfo->ret.size = size;
cinfo->ret.vtsize = size;
break;
}
case MONO_TYPE_TYPEDBYREF: {
MonoClass *klass = mono_class_from_mono_type_internal (sig->ret);
size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled);
cinfo->struct_ret = 1;
cinfo->ret.size = size;
cinfo->ret.vtsize = size;
}
break;
case MONO_TYPE_VOID:
break;
default:
g_error ("Can't handle as return value 0x%x", sig->ret->type);
}
pstart = 0;
/*
* To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
* the first argument, allowing 'this' to be always passed in the first arg reg.
* Also do this if the first argument is a reference type, since virtual calls
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
if (cinfo->struct_ret && !is_pinvoke &&
(sig->hasthis ||
(sig->param_count > 0 &&
MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
cinfo->args[nParm].size = sizeof (target_mgreg_t);
add_general (&gr, sz, cinfo->args + nParm);
} else {
cinfo->args[nParm].size = sizeof (target_mgreg_t);
add_general (&gr, sz, &cinfo->args [sig->hasthis + nParm]);
pstart = 1;
}
nParm ++;
cinfo->vret_arg_index = 1;
cinfo->ret.reg = gr;
gr ++;
} else {
/* this */
if (sig->hasthis) {
cinfo->args[nParm].size = sizeof (target_mgreg_t);
add_general (&gr, sz, cinfo->args + nParm);
nParm ++;
}
if (cinfo->struct_ret) {
cinfo->ret.reg = gr;
gr++;
}
}
if ((sig->call_convention == MONO_CALL_VARARG) && (sig->param_count == 0)) {
gr = S390_LAST_ARG_REG + 1;
fr = S390_LAST_FPARG_REG + 1;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, sz, &cinfo->sigCookie);
}
/*----------------------------------------------------------*/
/* We determine the size of the parameter code and stack */
/* requirements by checking the types and sizes of the */
/* parameters. */
/*----------------------------------------------------------*/
for (i = pstart; i < sig->param_count; ++i) {
MonoType *ptype;
/*--------------------------------------------------*/
/* Handle vararg type calls. All args are put on */
/* the stack. */
/*--------------------------------------------------*/
if ((sig->call_convention == MONO_CALL_VARARG) &&
(i == sig->sentinelpos)) {
gr = S390_LAST_ARG_REG + 1;
fr = S390_LAST_FPARG_REG + 1;
add_general (&gr, sz, &cinfo->sigCookie);
}
if (m_type_is_byref (sig->params [i])) {
add_general (&gr, sz, cinfo->args+nParm);
cinfo->args[nParm].size = sizeof(gpointer);
nParm++;
continue;
}
ptype = mini_get_underlying_type (sig->params [i]);
simpleType = ptype->type;
cinfo->args[nParm].type = simpleType;
switch (simpleType) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
cinfo->args[nParm].size = sizeof(char);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
cinfo->args[nParm].size = sizeof(short);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
cinfo->args[nParm].size = sizeof(int);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
cinfo->args[nParm].size = sizeof(gpointer);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
cinfo->args[nParm].size = sizeof(long long);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_R4:
cinfo->args[nParm].size = sizeof(float);
add_float (&fr, sz, cinfo->args+nParm, FALSE);
nParm++;
break;
case MONO_TYPE_R8:
cinfo->args[nParm].size = sizeof(double);
add_float (&fr, sz, cinfo->args+nParm, TRUE);
nParm++;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
cinfo->args[nParm].size = sizeof(gpointer);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE: {
MonoMarshalType *info;
MonoClass *klass = mono_class_from_mono_type_internal (ptype);
if (sig->pinvoke && !sig->marshalling_disabled)
size = mono_class_native_size(klass, NULL);
else
size = mono_class_value_size(klass, NULL);
if (simpleType != MONO_TYPE_GENERICINST) {
info = mono_marshal_load_type_info(klass);
if ((info->native_size == sizeof(float)) &&
(info->num_fields == 1) &&
(info->fields[0].field->type->type == MONO_TYPE_R4)) {
cinfo->args[nParm].size = sizeof(float);
add_float(&fr, sz, cinfo->args+nParm, FALSE);
nParm ++;
break;
}
if ((info->native_size == sizeof(double)) &&
(info->num_fields == 1) &&
(info->fields[0].field->type->type == MONO_TYPE_R8)) {
cinfo->args[nParm].size = sizeof(double);
add_float(&fr, sz, cinfo->args+nParm, TRUE);
nParm ++;
break;
}
}
cinfo->args[nParm].vtsize = 0;
cinfo->args[nParm].size = 0;
switch (size) {
/*----------------------------------*/
/* On S/390, structures of size 1, */
/* 2, 4, and 8 bytes are passed in */
/* (a) register(s). */
/*----------------------------------*/
case 0:
case 1:
case 2:
case 4:
case 8:
add_general(&gr, sz, cinfo->args+nParm);
cinfo->args[nParm].size = size;
cinfo->args[nParm].regtype = RegTypeStructByVal;
nParm++;
break;
default:
add_stackParm(&gr, sz, cinfo->args+nParm, size, RegTypeStructByVal);
nParm++;
}
}
break;
case MONO_TYPE_TYPEDBYREF: {
add_stackParm(&gr, sz, cinfo->args+nParm, sizeof(uintptr_t), RegTypeStructByAddr);
nParm++;
}
break;
default:
g_error ("Can't trampoline 0x%x", ptype);
}
}
/*----------------------------------------------------------*/
/* Handle the case where there are no implicit arguments */
/*----------------------------------------------------------*/
if ((sig->call_convention == MONO_CALL_VARARG) &&
(nParm > 0) &&
(!sig->pinvoke) &&
(sig->param_count == sig->sentinelpos)) {
gr = S390_LAST_ARG_REG + 1;
fr = S390_LAST_FPARG_REG + 1;
add_general (&gr, sz, &cinfo->sigCookie);
}
/*
* If we are passing a structure back then we make room at
* the end of the parameters that may have been placed on
* the stack
*/
if (cinfo->struct_ret) {
cinfo->ret.offset = sz->stack_size;
sz->stack_size += S390_ALIGN(cinfo->ret.size, align);
}
cinfo->lastgr = gr;
sz->stack_size = sz->stack_size + sz->parm_size;
sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long));
return (cinfo);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific allocation of variables
*
* @param[in] @cfg - Compile control block
*
* Set var information according to the calling convention for s390x.
*
*/
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
MonoInst *inst;
CallInfo *cinfo;
int iParm, iVar, offset, align, size, curinst;
int frame_reg = STK_BASE;
int sArg, eArg;
header = cfg->header;
cfg->flags |= MONO_CFG_HAS_SPILLUP;
/*---------------------------------------------------------*/
/* We use the frame register also for any method that has */
/* filter clauses. This way, when the handlers are called, */
/* the code will reference local variables using the frame */
/* reg instead of the stack pointer: if we had to restore */
/* the stack pointer, we'd corrupt the method frames that */
/* are already on the stack (since filters get called */
/* before stack unwinding happens) when the filter code */
/* would call any method. */
/*---------------------------------------------------------*/
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
frame_reg = s390_r11;
cfg->frame_reg = frame_reg;
cfg->arch.bkchain_reg = -1;
if (frame_reg != STK_BASE)
cfg->used_int_regs |= (1LL << frame_reg);
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*--------------------------------------------------------------*/
/* local vars are at a positive offset from the stack pointer */
/* also note that if the function uses alloca, we use s390_r11 */
/* to point at the local variables. */
/* add parameter area size for called functions */
/*--------------------------------------------------------------*/
if (cfg->param_area == 0)
offset = S390_MINIMAL_STACK_SIZE;
else
offset = cfg->param_area;
cfg->sig_cookie = 0;
if (MONO_TYPE_ISSTRUCT(sig->ret)) {
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg;
} else {
switch (mini_get_underlying_type (sig->ret)->type) {
case MONO_TYPE_VOID:
break;
default:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg;
}
}
if (sig->hasthis) {
inst = cfg->args [0];
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
offset = S390_ALIGN(offset, sizeof(gpointer));
inst->inst_offset = offset;
offset += sizeof (target_mgreg_t);
}
curinst = sArg = 1;
} else {
curinst = sArg = 0;
}
eArg = sig->param_count + sArg;
if (sig->call_convention == MONO_CALL_VARARG)
cfg->sig_cookie += S390_MINIMAL_STACK_SIZE;
for (iParm = sArg; iParm < eArg; ++iParm) {
inst = cfg->args [curinst];
if (inst->opcode != OP_REGVAR) {
switch (cinfo->args[iParm].regtype) {
case RegTypeStructByAddr : {
MonoInst *indir;
size = sizeof (target_mgreg_t);
if (cinfo->args [iParm].reg == STK_BASE) {
/* Similar to the == STK_BASE case below */
cfg->arch.bkchain_reg = s390_r12;
cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
inst->opcode = OP_REGOFFSET;
inst->dreg = mono_alloc_preg (cfg);
inst->inst_basereg = cfg->arch.bkchain_reg;
inst->inst_offset = cinfo->args [iParm].offset;
} else {
inst->opcode = OP_REGOFFSET;
inst->dreg = cinfo->args [iParm].reg;
inst->opcode = OP_REGOFFSET;
inst->dreg = mono_alloc_preg (cfg);
inst->inst_basereg = cfg->frame_reg;
// inst->inst_offset = cinfo->args [iParm].offset;
inst->inst_offset = offset;
}
/* Add a level of indirection */
MONO_INST_NEW (cfg, indir, 0);
*indir = *inst;
inst->opcode = OP_VTARG_ADDR;
inst->inst_left = indir;
}
break;
case RegTypeStructByVal : {
MonoInst *indir;
cfg->arch.bkchain_reg = s390_r12;
cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
size = cinfo->args[iParm].size;
if (cinfo->args [iParm].reg == STK_BASE) {
int offStruct = 0;
switch(size) {
case 0: case 1: case 2: case 4: case 8:
offStruct = (size < 8 ? sizeof(uintptr_t) - size : 0);
default:
inst->opcode = OP_REGOFFSET;
inst->dreg = mono_alloc_preg (cfg);
inst->inst_basereg = cfg->arch.bkchain_reg;
inst->inst_offset = cinfo->args [iParm].offset + offStruct;
}
} else {
offset = S390_ALIGN(offset, sizeof(uintptr_t));
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->frame_reg;
inst->inst_offset = offset;
}
switch (size) {
case 0 : case 1 : case 2 : case 4 : case 8 :
break;
default :
/* Add a level of indirection */
MONO_INST_NEW (cfg, indir, 0);
*indir = *inst;
inst->opcode = OP_VTARG_ADDR;
inst->inst_left = indir;
}
}
break;
default :
if (cinfo->args [iParm].reg == STK_BASE) {
/*
* These arguments are in the previous frame, so we can't
* compute their offset from the current frame pointer right
* now, since cfg->stack_offset is not yet known, so dedicate a
* register holding the previous frame pointer.
*/
cfg->arch.bkchain_reg = s390_r12;
cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->arch.bkchain_reg;
size = (cinfo->args[iParm].size < 8
? 8 - cinfo->args[iParm].size
: 0);
inst->inst_offset = cinfo->args [iParm].offset + size;
size = sizeof (long);
} else {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
size = (cinfo->args[iParm].size < 8
? sizeof(int)
: sizeof(long));
offset = S390_ALIGN(offset, size);
if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
inst->inst_offset = offset;
else
inst->inst_offset = offset + (8 - size);
}
}
offset += MAX(size, 8);
}
curinst++;
}
cfg->locals_min_stack_offset = offset;
curinst = cfg->locals_start;
for (iVar = curinst; iVar < cfg->num_varinfo; ++iVar) {
inst = cfg->varinfo [iVar];
if ((inst->flags & MONO_INST_IS_DEAD) ||
(inst->opcode == OP_REGVAR))
continue;
/*--------------------------------------------------*/
/* inst->backend.is_pinvoke indicates native sized */
/* value types this is used by the pinvoke wrappers */
/* when they call functions returning structure */
/*--------------------------------------------------*/
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype))
size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype),
(guint32 *) &align);
else
size = mono_type_size (inst->inst_vtype, &align);
offset = S390_ALIGN(offset, align);
inst->inst_offset = offset;
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
offset += size;
DEBUG (g_print("allocating local %d to %ld, size: %d\n",
iVar, inst->inst_offset, size));
}
offset = S390_ALIGN(offset, sizeof(uintptr_t));
cfg->locals_max_stack_offset = offset;
/*------------------------------------------------------*/
/* Reserve space to save LMF and caller saved registers */
/*------------------------------------------------------*/
if (cfg->method->save_lmf)
offset += sizeof (MonoLMF);
/*------------------------------------------------------*/
/* align the offset */
/*------------------------------------------------------*/
cfg->stack_offset = S390_ALIGN(offset, S390_STACK_ALIGNMENT);
/*------------------------------------------------------*/
/* Fix offsets for args whose value is in parent frame */
/*------------------------------------------------------*/
for (iParm = sArg; iParm < eArg; ++iParm) {
inst = cfg->args [iParm];
if (inst->opcode == OP_S390_STKARG) {
inst->opcode = OP_REGOFFSET;
inst->inst_offset += cfg->stack_offset;
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific creation of variables
*
* @param[in] @cfg - Compile control block
*
* Create variables for the method.
*
*/
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
CallInfo *cinfo;
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->struct_ret) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_tramp_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.bp_tramp_var = ins;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Add a register to the call operation
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
* @param[in] @storage - Register use type
* @param[in] @reg - Register number
* @param[in] @tree - Call arguments
*
* Add register use information to the call sequence
*/
static void
add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
{
MonoInst *ins;
switch (storage) {
case RegTypeGeneral:
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
break;
case RegTypeFP:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
case RegTypeFPR4:
MONO_INST_NEW (cfg, ins, OP_S390_SETF4RET);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit a signature cookine
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
* @param[in] @cinfo - Call Information
*
* Emit the signature cooke as a parameter
*/
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmpSig;
MonoInst *sig_arg;
cfg->disable_aot = TRUE;
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it
* passed on the stack after the signature. So compensate
* by passing a different signature.
*/
tmpSig = mono_metadata_signature_dup (call->signature);
tmpSig->param_count -= call->signature->sentinelpos;
tmpSig->sentinelpos = 0;
if (tmpSig->param_count > 0)
memcpy (tmpSig->params,
call->signature->params + call->signature->sentinelpos,
tmpSig->param_count * sizeof(MonoType *));
MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
sig_arg->dreg = mono_alloc_ireg (cfg);
sig_arg->inst_p0 = tmpSig;
MONO_ADD_INS (cfg->cbb, sig_arg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, STK_BASE,
cinfo->sigCookie.offset, sig_arg->dreg);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific emission of a call operation
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
*
* Process all parameters for a call and generate the sequence of
* operations to perform the call according to the s390x ABI.
*/
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *in;
MonoMethodSignature *sig;
MonoInst *ins;
int i, n, lParamArea;
CallInfo *cinfo;
ArgInfo *ainfo = NULL;
int stackSize;
sig = call->signature;
n = sig->param_count + sig->hasthis;
DEBUG (g_print ("Call requires: %d parameters\n",n));
cinfo = get_call_info (cfg->mempool, sig);
stackSize = cinfo->sz.stack_size + cinfo->sz.parm_size;
call->stack_usage = MAX(stackSize, call->stack_usage);
lParamArea = MAX((call->stack_usage-S390_MINIMAL_STACK_SIZE-cinfo->sz.parm_size), 0);
cfg->param_area = MAX(((signed) cfg->param_area), lParamArea); /* FIXME */
cfg->flags |= MONO_CFG_HAS_CALLS;
if (cinfo->struct_ret) {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->sreg1 = call->vret_var->dreg;
ins->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, cinfo->ret.reg, FALSE);
}
for (i = 0; i < n; ++i) {
MonoType *t;
ainfo = cinfo->args + i;
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mini_get_underlying_type (t);
in = call->args [i];
if ((sig->call_convention == MONO_CALL_VARARG) &&
(!sig->pinvoke) &&
(i == sig->sentinelpos)) {
emit_sig_cookie (cfg, call, cinfo);
}
switch (ainfo->regtype) {
case RegTypeGeneral :
add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
break;
case RegTypeFP :
case RegTypeFPR4 :
if (MONO_TYPE_ISSTRUCT (t)) {
/* Valuetype passed in one fp register */
ainfo->regtype = RegTypeStructByValInFP;
/* Fall through */
} else {
add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
break;
}
case RegTypeStructByVal :
case RegTypeStructByAddr : {
g_assert (in->klass);
MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
ins->sreg1 = in->dreg;
ins->klass = in->klass;
ins->backend.size = ainfo->size;
ins->inst_p0 = call;
ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
MONO_ADD_INS (cfg->cbb, ins);
break;
}
case RegTypeBase :
if (!m_type_is_byref (t) && t->type == MONO_TYPE_R4) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG,
STK_BASE, ainfo->offset + 4,
in->dreg);
} else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG,
STK_BASE, ainfo->offset,
in->dreg);
} else {
MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
ins->inst_destbasereg = STK_BASE;
ins->inst_offset = ainfo->offset;
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
break;
default:
g_assert_not_reached ();
break;
}
}
/*
* Handle the case where there are no implicit arguments
*/
if ((sig->call_convention == MONO_CALL_VARARG) &&
(!sig->pinvoke) &&
(i == sig->sentinelpos)) {
emit_sig_cookie (cfg, call, cinfo);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific Value Type parameter processing
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
* @param[in] @src - Source parameter
*
* Process value type parameters for a call operation
*/
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst*) ins->inst_p0;
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
if (ainfo->regtype == RegTypeStructByVal) {
if (ainfo->reg != STK_BASE) {
emit_outarg_vtr (cfg, ins, src);
} else {
emit_outarg_vts (cfg, ins, src);
}
} else if (ainfo->regtype == RegTypeStructByValInFP) {
int dreg = mono_alloc_freg (cfg);
if (ainfo->size == 4) {
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, dreg, src->dreg, 0);
MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, dreg, dreg);
} else {
g_assert (ainfo->size == 8);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, dreg, src->dreg, 0);
}
mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
} else {
ERROR_DECL (error);
MonoMethodHeader *header;
MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL);
MonoInst *load;
int ovf_size = ainfo->vtsize,
srcReg;
guint32 size;
/* FIXME: alignment? */
if (call->signature->pinvoke && !call->signature->marshalling_disabled) {
size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL);
vtcopy->backend.is_pinvoke = 1;
} else {
size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL);
}
if (size > 0)
g_assert (ovf_size > 0);
header = mono_method_get_header_checked (cfg->method, error);
mono_error_assert_ok (error); /* FIXME don't swallow the error */
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
srcReg = s390_r11;
else
srcReg = STK_BASE;
EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
if (ainfo->reg == STK_BASE) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, srcReg, ainfo->offset, load->dreg);
if (cfg->compute_gc_maps) {
MonoInst *def;
EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass));
}
} else
mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific call value return processing
*
* @param[in] @cfg - Compile control block
* @param[in] @method - Method
* @param[in] @val - Instruction representing the result returned to method
*
* Create the sequence to unload the value returned from a call
*/
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (!m_type_is_byref (ret)) {
if (ret->type == MONO_TYPE_R4) {
MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, s390_f0, val->dreg);
return;
} else if (ret->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, s390_f0, val->dreg);
return;
}
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
/*========================= End of Function ========================*/
/**
*
* @brief Replace compound compare/branch operations with single operation
*
* @param[in] @bb - Basic block
* @param[in] @ins - Current instruction
* @param[in] @cc - Condition code of branch
* @param[in] @logical - Whether comparison is signed or logical
*
* Form a peephole pass at the code looking for simple optimizations
* that will combine compare/branch instructions into a single operation.
*/
static void
compare_and_branch(MonoBasicBlock *bb, MonoInst *ins, int cc, gboolean logical)
{
MonoInst *last;
if (mono_hwcap_s390x_has_gie) {
last = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
ins->sreg1 = last->sreg1;
ins->sreg2 = last->sreg2;
ins->sreg3 = cc;
switch(last->opcode) {
case OP_ICOMPARE:
if (logical)
ins->opcode = OP_S390_CLRJ;
else
ins->opcode = OP_S390_CRJ;
MONO_DELETE_INS(bb, last);
break;
case OP_COMPARE:
case OP_LCOMPARE:
if (logical)
ins->opcode = OP_S390_CLGRJ;
else
ins->opcode = OP_S390_CGRJ;
MONO_DELETE_INS(bb, last);
break;
case OP_ICOMPARE_IMM:
ins->backend.data = (gpointer) last->inst_imm;
if (logical)
ins->opcode = OP_S390_CLIJ;
else
ins->opcode = OP_S390_CIJ;
MONO_DELETE_INS(bb, last);
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
ins->backend.data = (gpointer) last->inst_imm;
if (logical)
ins->opcode = OP_S390_CLGIJ;
else
ins->opcode = OP_S390_CGIJ;
MONO_DELETE_INS(bb, last);
break;
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecure-specific peephole pass 1 processing
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Form a peephole pass at the code looking for compare and branch
* optimizations.
*/
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_IBEQ:
case OP_LBEQ:
compare_and_branch(bb, ins, S390_CC_EQ, FALSE);
break;
case OP_LBNE_UN:
case OP_IBNE_UN:
compare_and_branch(bb, ins, S390_CC_NE, TRUE);
break;
case OP_LBLT:
case OP_IBLT:
compare_and_branch(bb, ins, S390_CC_LT, FALSE);
break;
case OP_LBLT_UN:
case OP_IBLT_UN:
compare_and_branch(bb, ins, S390_CC_LT, TRUE);
break;
case OP_LBGT:
case OP_IBGT:
compare_and_branch(bb, ins, S390_CC_GT, FALSE);
break;
case OP_LBGT_UN:
case OP_IBGT_UN:
compare_and_branch(bb, ins, S390_CC_GT, TRUE);
break;
case OP_LBGE:
case OP_IBGE:
compare_and_branch(bb, ins, S390_CC_GE, FALSE);
break;
case OP_LBGE_UN:
case OP_IBGE_UN:
compare_and_branch(bb, ins, S390_CC_GE, TRUE);
break;
case OP_LBLE:
case OP_IBLE:
compare_and_branch(bb, ins, S390_CC_LE, FALSE);
break;
case OP_LBLE_UN:
case OP_IBLE_UN:
compare_and_branch(bb, ins, S390_CC_LE, TRUE);
break;
// default:
// mono_peephole_ins (bb, ins);
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecure-specific peephole pass 2 processing
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Form a peephole pass at the code looking for simple optimizations.
*/
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n, *last_ins = NULL;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_LOADU4_MEMBASE:
case OP_LOADI4_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4;
ins->sreg1 = last_ins->sreg1;
}
break;
}
mono_peephole_ins (bb, ins);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecure-specific lowering pass processing
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Form a lowering pass at the code looking for simple optimizations.
*/
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *next;
MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) {
switch (ins->opcode) {
case OP_DIV_IMM:
case OP_REM_IMM:
case OP_IDIV_IMM:
case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LAND_IMM:
case OP_LOR_IMM:
case OP_LREM_IMM:
case OP_LXOR_IMM:
case OP_LOCALLOC_IMM:
mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_LADD_IMM:
if (!s390_is_imm16 (ins->inst_imm))
/* This is created by the memcpy code which ignores is_inst_imm */
mono_decompose_op_imm (cfg, bb, ins);
break;
default:
break;
}
}
bb->max_vreg = cfg->next_vreg;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit float-to-int sequence
*
* @param[in] @cfg - Compile control block
* @param[in] @code - Current instruction area
* @param[in] @dreg - Destination general register
* @param[in] @sreg - Source floating point register
* @param[in] @size - Size of destination
* @param[in] @is_signed - Destination is signed/unsigned
* @returns Next instruction location
*
* Emit instructions to convert a single precision floating point value to an integer
*/
static guchar *
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg. */
if (is_signed) {
s390_cgebr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x8000);
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
} else {
short *o[1];
s390_lgdr (code, s390_r14, s390_f14);
s390_lgdr (code, s390_r13, s390_f15);
S390_SET (code, s390_r0, 0x4f000000u);
s390_ldgr (code, s390_f14, s390_r0);
s390_ler (code, s390_f15, sreg);
s390_cebr (code, s390_f15, s390_f14);
s390_jl (code, 0); CODEPTR (code, o[0]);
S390_SET (code, s390_r0, 0x4f800000u);
s390_ldgr (code, s390_f14, s390_r0);
s390_sebr (code, s390_f15, s390_f14);
s390_cfebr (code, dreg, 7, s390_f15);
s390_j (code, 4);
PTRSLOT (code, o[0]);
s390_cfebr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
s390_ldgr (code, s390_f14, s390_r14);
s390_ldgr (code, s390_f15, s390_r13);
}
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit double-to-int sequence
*
* @param[in] @cfg - Compile control block
* @param[in] @code - Current instruction area
* @param[in] @dreg - Destination general register
* @param[in] @sreg - Source floating point register
* @param[in] @size - Size of destination
* @param[in] @is_signed - Destination is signed/unsigned
* @returns Next instruction location
*
* Emit instructions to convert a single precision floating point value to an integer
*/
static guchar*
emit_double_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg. */
if (is_signed) {
s390_cgdbr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x8000);
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
} else {
short *o[1];
s390_lgdr (code, s390_r14, s390_f14);
s390_lgdr (code, s390_r13, s390_f15);
S390_SET (code, s390_r0, 0x41e0000000000000llu);
s390_ldgr (code, s390_f14, s390_r0);
s390_ldr (code, s390_f15, sreg);
s390_cdbr (code, s390_f15, s390_f14);
s390_jl (code, 0); CODEPTR (code, o[0]);
S390_SET (code, s390_r0, 0x41f0000000000000llu);
s390_ldgr (code, s390_f14, s390_r0);
s390_sdbr (code, s390_f15, s390_f14);
s390_cfdbr (code, dreg, 7, s390_f15);
s390_j (code, 4);
PTRSLOT (code, o[0]);
s390_cfdbr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
s390_ldgr (code, s390_f14, s390_r14);
s390_ldgr (code, s390_f15, s390_r13);
}
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Check if branch is for unsigned comparison
*
* @param[in] @next - Next instruction
* @returns True if the branch is for an unsigned comparison
*
* Determine if next instruction is a branch for an unsigned comparison
*/
static gboolean
is_unsigned (MonoInst *next)
{
if ((next) &&
(((next->opcode >= OP_IBNE_UN) &&
(next->opcode <= OP_IBLT_UN)) ||
((next->opcode >= OP_LBNE_UN) &&
(next->opcode <= OP_LBLT_UN)) ||
((next->opcode >= OP_COND_EXC_NE_UN) &&
(next->opcode <= OP_COND_EXC_LT_UN)) ||
((next->opcode >= OP_COND_EXC_INE_UN) &&
(next->opcode <= OP_COND_EXC_ILT_UN)) ||
((next->opcode == OP_CLT_UN) ||
(next->opcode == OP_CGT_UN) ||
(next->opcode == OP_ICGE_UN) ||
(next->opcode == OP_ICLE_UN)) ||
((next->opcode == OP_ICLT_UN) ||
(next->opcode == OP_ICGT_UN) ||
(next->opcode == OP_LCLT_UN) ||
(next->opcode == OP_LCGT_UN))))
return TRUE;
else
return FALSE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecutre-specific processing of a basic block
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Process instructions within basic block emitting s390x instructions
* based on the VM operation codes
*/
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
int src2;
/* we don't align basic blocks of loops on s390 */
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
MONO_BB_FOR_EACH_INS (bb, ins) {
const guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
int max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
case OP_STOREI1_MEMBASE_IMM: {
s390_lghi (code, s390_r0, ins->inst_imm);
S390_LONG (code, stcy, stc, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI2_MEMBASE_IMM: {
s390_lghi (code, s390_r0, ins->inst_imm);
S390_LONG (code, sthy, sth, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI4_MEMBASE_IMM: {
s390_lgfi (code, s390_r0, ins->inst_imm);
S390_LONG (code, sty, st, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM: {
S390_SET (code, s390_r0, ins->inst_imm);
S390_LONG (code, stg, stg, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI1_MEMBASE_REG: {
S390_LONG (code, stcy, stc, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI2_MEMBASE_REG: {
S390_LONG (code, sthy, sth, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI4_MEMBASE_REG: {
S390_LONG (code, sty, st, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG: {
S390_LONG (code, stg, stg, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADU4_MEM:
g_assert_not_reached ();
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE: {
S390_LONG (code, lg, lg, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADI4_MEMBASE: {
S390_LONG (code, lgf, lgf, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADU4_MEMBASE: {
S390_LONG (code, llgf, llgf, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADU1_MEMBASE: {
S390_LONG (code, llgc, llgc, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADI1_MEMBASE: {
S390_LONG (code, lgb, lgb, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADU2_MEMBASE: {
S390_LONG (code, llgh, llgh, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADI2_MEMBASE: {
S390_LONG (code, lgh, lgh, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LCONV_TO_I1: {
s390_lgbr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_I2: {
s390_lghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_U1: {
s390_llgcr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_U2: {
s390_llghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_I1: {
s390_lgbr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_I2: {
s390_lghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_U1: {
s390_llgcr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_U2: {
s390_llghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_U4: {
s390_llgfr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_I4: {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
break;
case OP_COMPARE:
case OP_LCOMPARE: {
if (is_unsigned (ins->next))
s390_clgr (code, ins->sreg1, ins->sreg2);
else
s390_cgr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_ICOMPARE: {
if (is_unsigned (ins->next))
s390_clr (code, ins->sreg1, ins->sreg2);
else
s390_cr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM: {
gboolean branchUn = is_unsigned (ins->next);
if ((ins->inst_imm == 0) && (!branchUn)) {
s390_ltgr (code, ins->sreg1, ins->sreg1);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
if (branchUn)
s390_clgr (code, ins->sreg1, s390_r0);
else
s390_cgr (code, ins->sreg1, s390_r0);
}
}
break;
case OP_ICOMPARE_IMM: {
gboolean branchUn = is_unsigned (ins->next);
if ((ins->inst_imm == 0) && (!branchUn)) {
s390_ltr (code, ins->sreg1, ins->sreg1);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
if (branchUn)
s390_clr (code, ins->sreg1, s390_r0);
else
s390_cr (code, ins->sreg1, s390_r0);
}
}
break;
case OP_BREAK: {
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
}
break;
case OP_ADDCC: {
if (mono_hwcap_s390x_has_mlt) {
s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_agr (code, ins->dreg, src2);
}
}
break;
case OP_LADD: {
if (mono_hwcap_s390x_has_mlt) {
s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_agr (code, ins->dreg, src2);
}
}
break;
case OP_ADC: {
CHECK_SRCDST_COM;
s390_alcgr (code, ins->dreg, src2);
}
break;
case OP_ADD_IMM: {
if (mono_hwcap_s390x_has_mlt) {
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agrk (code, ins->dreg, ins->sreg1, s390_r0);
}
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghi (code, ins->dreg, ins->inst_imm);
} else if (s390_is_imm32 (ins->inst_imm)) {
s390_agfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agr (code, ins->dreg, s390_r0);
}
}
}
break;
case OP_LADD_IMM: {
if (mono_hwcap_s390x_has_mlt) {
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agrk (code, ins->dreg, ins->sreg1, s390_r0);
}
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm32 (ins->inst_imm)) {
s390_agfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agr (code, ins->dreg, s390_r0);
}
}
}
break;
case OP_ADC_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_IADD_OVF:
case OP_S390_IADD_OVF: {
CHECK_SRCDST_COM;
s390_ar (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
}
break;
case OP_IADD_OVF_UN:
case OP_S390_IADD_OVF_UN: {
CHECK_SRCDST_COM;
s390_alr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
s390_llgfr (code, ins->dreg, ins->dreg);
}
break;
case OP_ADD_OVF_CARRY: {
CHECK_SRCDST_COM;
s390_lghi (code, s390_r0, 0);
s390_lgr (code, s390_r1, s390_r0);
s390_alcgr (code, s390_r0, s390_r1);
s390_agr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_agr (code, ins->dreg, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_ADD_OVF_UN_CARRY: {
CHECK_SRCDST_COM;
s390_alcgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
}
break;
case OP_SUBCC: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_sgr (code, ins->dreg, src2);
}
}
break;
case OP_LSUB: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_sgr (code, ins->dreg, src2);
}
}
break;
case OP_SBB: {
CHECK_SRCDST_NCOM;
s390_slbgr(code, ins->dreg, src2);
}
break;
case OP_SUB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_aghi (code, ins->dreg, -ins->inst_imm);
} else if (s390_is_imm32 (-ins->inst_imm)) {
s390_slgfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LSUB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_aghi (code, ins->dreg, -ins->inst_imm);
} else if (s390_is_imm32 (-ins->inst_imm)) {
s390_slgfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_SBB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
s390_slbgr (code, ins->dreg, s390_r0);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slbgr(code, ins->dreg, s390_r0);
}
}
break;
case OP_SUB_OVF_CARRY: {
CHECK_SRCDST_NCOM;
s390_lghi (code, s390_r0, 0);
s390_lgr (code, s390_r1, s390_r0);
s390_slbgr (code, s390_r0, s390_r1);
s390_sgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_agr (code, ins->dreg, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_SUB_OVF_UN_CARRY: {
CHECK_SRCDST_NCOM;
s390_slbgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
}
break;
case OP_LAND: {
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
if (ins->sreg1 == ins->dreg) {
s390_ngr (code, ins->dreg, ins->sreg2);
} else {
if (ins->sreg2 == ins->dreg) {
s390_ngr (code, ins->dreg, ins->sreg1);
} else {
s390_lgr (code, ins->dreg, ins->sreg1);
s390_ngr (code, ins->dreg, ins->sreg2);
}
}
}
}
break;
case OP_AND_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_ngr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LDIV: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_dsgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r1);
}
break;
case OP_LDIV_UN: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_lghi (code, s390_r0, 0);
s390_dlgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r1);
}
break;
case OP_LREM: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_dsgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r0);
break;
}
case OP_LREM_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else {
s390_lgfi (code, s390_r13, ins->inst_imm);
}
s390_lgr (code, s390_r0, ins->sreg1);
s390_dsgr (code, s390_r0, s390_r13);
s390_lgfr (code, ins->dreg, s390_r0);
}
break;
case OP_LREM_UN: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_lghi (code, s390_r0, 0);
s390_dlgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r0);
}
break;
case OP_LOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
if (ins->sreg1 == ins->dreg) {
s390_ogr (code, ins->dreg, ins->sreg2);
} else {
if (ins->sreg2 == ins->dreg) {
s390_ogr (code, ins->dreg, ins->sreg1);
} else {
s390_lgr (code, ins->dreg, ins->sreg1);
s390_ogr (code, ins->dreg, ins->sreg2);
}
}
}
}
break;
case OP_OR_IMM: {
S390_SET_MASK(code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_ogr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LXOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
if (ins->sreg1 == ins->dreg) {
s390_xgr (code, ins->dreg, ins->sreg2);
}
else {
if (ins->sreg2 == ins->dreg) {
s390_xgr (code, ins->dreg, ins->sreg1);
}
else {
s390_lgr (code, ins->dreg, ins->sreg1);
s390_xgr (code, ins->dreg, ins->sreg2);
}
}
}
}
break;
case OP_XOR_IMM: {
S390_SET_MASK(code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_xgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LSHL: {
CHECK_SRCDST_NCOM;
s390_sllg (code, ins->dreg, ins->dreg, src2, 0);
}
break;
case OP_SHL_IMM:
case OP_LSHL_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_sllg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f));
}
break;
case OP_LSHR: {
CHECK_SRCDST_NCOM;
s390_srag (code, ins->dreg, ins->dreg, src2, 0);
}
break;
case OP_SHR_IMM:
case OP_LSHR_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_srag (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f));
}
break;
case OP_SHR_UN_IMM:
case OP_LSHR_UN_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_srlg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f));
}
break;
case OP_LSHR_UN: {
CHECK_SRCDST_NCOM;
s390_srlg (code, ins->dreg, ins->dreg, src2, 0);
}
break;
case OP_LNOT: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_lghi (code, s390_r0, -1);
s390_xgr (code, ins->dreg, s390_r0);
}
break;
case OP_LNEG: {
s390_lcgr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LMUL: {
CHECK_SRCDST_COM;
s390_msgr (code, ins->dreg, src2);
}
break;
case OP_MUL_IMM:
case OP_LMUL_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if ((mono_hwcap_s390x_has_gie) &&
(s390_is_imm32 (ins->inst_imm))) {
s390_msgfi (code, ins->dreg, ins->inst_imm);
} else {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else if (s390_is_imm32 (ins->inst_imm)) {
s390_lgfi (code, s390_r13, ins->inst_imm);
} else {
S390_SET (code, s390_r13, ins->inst_imm);
}
s390_msgr (code, ins->dreg, s390_r13);
}
}
break;
case OP_LMUL_OVF: {
short int *o[2];
if (mono_hwcap_s390x_has_mie2) {
s390_msgrkc (code, ins->dreg, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
} else {
s390_ltgr (code, s390_r1, ins->sreg1);
s390_jz (code, 0); CODEPTR(code, o[0]);
s390_ltgr (code, s390_r0, ins->sreg2);
s390_jnz (code, 6);
s390_lghi (code, s390_r1, 0);
s390_j (code, 0); CODEPTR(code, o[1]);
s390_xgr (code, s390_r0, s390_r1);
s390_msgr (code, s390_r1, ins->sreg2);
s390_xgr (code, s390_r0, s390_r1);
s390_srlg (code, s390_r0, s390_r0, 0, 63);
s390_ltgr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
PTRSLOT (code, o[0]);
PTRSLOT (code, o[1]);
s390_lgr (code, ins->dreg, s390_r1);
}
}
break;
case OP_LMUL_OVF_UN: {
s390_lghi (code, s390_r0, 0);
s390_lgr (code, s390_r1, ins->sreg1);
s390_mlgr (code, s390_r0, ins->sreg2);
s390_ltgr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
s390_lgr (code, ins->dreg, s390_r1);
}
break;
case OP_IADDCC: {
g_assert_not_reached ();
CHECK_SRCDST_COM_I;
s390_algr (code, ins->dreg, src2);
}
break;
case OP_IADD: {
CHECK_SRCDST_COM_I;
s390_agr (code, ins->dreg, src2);
}
break;
case OP_IADC: {
g_assert_not_reached ();
CHECK_SRCDST_COM_I;
s390_alcgr (code, ins->dreg, src2);
}
break;
case OP_IADD_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghi (code, ins->dreg, ins->inst_imm);
} else {
s390_afi (code, ins->dreg, ins->inst_imm);
}
}
break;
case OP_IADC_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LADD_OVF:
case OP_S390_LADD_OVF: {
if (mono_hwcap_s390x_has_mlt) {
s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_agr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_LADD_OVF_UN:
case OP_S390_LADD_OVF_UN: {
if (mono_hwcap_s390x_has_mlt) {
s390_algrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_algr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
}
break;
case OP_ISUBCC: {
if (mono_hwcap_s390x_has_mlt) {
s390_slgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM_I;
s390_slgr (code, ins->dreg, src2);
}
}
break;
case OP_ISUB: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM_I;
s390_sgr (code, ins->dreg, src2);
}
}
break;
case OP_ISBB: {
CHECK_SRCDST_NCOM_I;
s390_slbgr (code, ins->dreg, src2);
}
break;
case OP_ISUB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_aghi (code, ins->dreg, -ins->inst_imm);
} else {
s390_agfi (code, ins->dreg, -ins->inst_imm);
}
}
break;
case OP_ISBB_IMM: {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slgfr (code, ins->dreg, s390_r0);
}
break;
case OP_ISUB_OVF:
case OP_S390_ISUB_OVF: {
if (mono_hwcap_s390x_has_mlt) {
s390_srk (code, ins->dreg, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
} else {
CHECK_SRCDST_NCOM;
s390_sr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
}
}
break;
case OP_ISUB_OVF_UN:
case OP_S390_ISUB_OVF_UN: {
if (mono_hwcap_s390x_has_mlt) {
s390_slrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_slr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
s390_llgfr(code, ins->dreg, ins->dreg);
}
break;
case OP_LSUB_OVF:
case OP_S390_LSUB_OVF: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_sgr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_LSUB_OVF_UN:
case OP_S390_LSUB_OVF_UN: {
CHECK_SRCDST_NCOM;
s390_slgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
}
break;
case OP_IAND: {
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM_I;
s390_ngr (code, ins->dreg, src2);
}
}
break;
case OP_IAND_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_ngr (code, ins->dreg, s390_r0);
}
}
break;
case OP_IDIV: {
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_IDIV_UN: {
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srdl (code, s390_r0, 0, 32);
s390_dlr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_IDIV_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else {
s390_lgfi (code, s390_r13, ins->inst_imm);
}
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_IREM: {
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r0);
break;
case OP_IREM_UN:
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srdl (code, s390_r0, 0, 32);
s390_dlr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r0);
}
break;
case OP_IREM_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else {
s390_lgfi (code, s390_r13, ins->inst_imm);
}
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r0);
}
break;
case OP_IOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM_I;
s390_ogr (code, ins->dreg, src2);
}
}
break;
case OP_IOR_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_ogr (code, ins->dreg, s390_r0);
}
}
break;
case OP_IXOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM_I;
s390_xgr (code, ins->dreg, src2);
}
}
break;
case OP_IXOR_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_xgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_ISHL: {
CHECK_SRCDST_NCOM;
s390_sll (code, ins->dreg, src2, 0);
}
break;
case OP_ISHL_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_sll (code, ins->dreg, 0, (ins->inst_imm & 0x1f));
}
break;
case OP_ISHR: {
CHECK_SRCDST_NCOM;
s390_sra (code, ins->dreg, src2, 0);
}
break;
case OP_ISHR_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_sra (code, ins->dreg, 0, (ins->inst_imm & 0x1f));
}
break;
case OP_ISHR_UN_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_srl (code, ins->dreg, 0, (ins->inst_imm & 0x1f));
}
break;
case OP_ISHR_UN: {
CHECK_SRCDST_NCOM;
s390_srl (code, ins->dreg, src2, 0);
}
break;
case OP_INOT: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_lghi (code, s390_r0, -1);
s390_xgr (code, ins->dreg, s390_r0);
}
break;
case OP_INEG: {
s390_lcgr (code, ins->dreg, ins->sreg1);
}
break;
case OP_IMUL: {
CHECK_SRCDST_COM_I;
s390_msr (code, ins->dreg, src2);
}
break;
case OP_IMUL_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
} else {
s390_lgfi (code, s390_r0, ins->inst_imm);
}
s390_msr (code, ins->dreg, s390_r0);
}
break;
case OP_IMUL_OVF: {
short int *o[2];
if (mono_hwcap_s390x_has_mie2) {
s390_msrkc (code, ins->dreg, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
} else {
s390_ltr (code, s390_r1, ins->sreg1);
s390_jz (code, 0); CODEPTR(code, o[0]);
s390_ltr (code, s390_r0, ins->sreg2);
s390_jnz (code, 6);
s390_lhi (code, s390_r1, 0);
s390_j (code, 0); CODEPTR(code, o[1]);
s390_xr (code, s390_r0, s390_r1);
s390_msr (code, s390_r1, ins->sreg2);
s390_xr (code, s390_r0, s390_r1);
s390_srl (code, s390_r0, 0, 31);
s390_ltr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
PTRSLOT (code, o[0]);
PTRSLOT (code, o[1]);
s390_lgfr (code, ins->dreg, s390_r1);
}
}
break;
case OP_IMUL_OVF_UN: {
s390_lhi (code, s390_r0, 0);
s390_lr (code, s390_r1, ins->sreg1);
s390_mlr (code, s390_r0, ins->sreg2);
s390_ltr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_ICONST:
case OP_I8CONST: {
S390_SET (code, ins->dreg, ins->inst_c0);
}
break;
case OP_AOTCONST: {
mono_add_patch_info (cfg, code - cfg->native_code,
(MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
S390_LOAD_TEMPLATE (code, ins->dreg);
}
break;
case OP_JUMP_TABLE: {
mono_add_patch_info (cfg, code - cfg->native_code,
(MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
S390_LOAD_TEMPLATE (code, ins->dreg);
}
break;
case OP_MOVE:
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_I:
case OP_LCONV_TO_I8:
case OP_SEXT_I4:
s390_lgfr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_I4:
s390_lgfr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_U:
case OP_LCONV_TO_U8:
case OP_LCONV_TO_U4:
case OP_ZEXT_I4:
s390_llgfr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_OVF_U4:
S390_SET (code, s390_r0, 4294967295);
s390_clgr (code, ins->sreg1, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException");
s390_ltgr (code, ins->sreg1, ins->sreg1);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException");
s390_llgfr(code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_OVF_I4_UN:
S390_SET (code, s390_r0, 2147483647);
s390_cgr (code, ins->sreg1, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException");
s390_ltgr (code, ins->sreg1, ins->sreg1);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException");
s390_lgfr (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R4:
if (ins->dreg != ins->sreg1)
s390_ler (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R8:
s390_ldebr (code, ins->dreg, ins->sreg1);
break;
case OP_FMOVE:
if (ins->dreg != ins->sreg1)
s390_ldr (code, ins->dreg, ins->sreg1);
break;
case OP_RMOVE:
if (ins->dreg != ins->sreg1)
s390_ldr (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I8:
s390_lgdr (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_I8_TO_F:
s390_ldgr (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I4:
s390_lgdr (code, ins->dreg, ins->sreg1);
s390_srag (code, ins->dreg, ins->dreg, 0, 32);
break;
case OP_MOVE_I4_TO_F:
s390_slag (code, s390_r0, ins->sreg1, 0, 32);
s390_ldgr (code, ins->dreg, s390_r0);
break;
case OP_FCONV_TO_R4:
s390_ledbr (code, ins->dreg, ins->sreg1);
break;
case OP_S390_SETF4RET:
s390_ldr (code, ins->dreg, ins->sreg1);
break;
case OP_TLS_GET: {
if (s390_is_imm16 (ins->inst_offset)) {
s390_lghi (code, s390_r13, ins->inst_offset);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_lgfi (code, s390_r13, ins->inst_offset);
} else {
S390_SET (code, s390_r13, ins->inst_offset);
}
s390_ear (code, s390_r1, 0);
s390_sllg(code, s390_r1, s390_r1, 0, 32);
s390_ear (code, s390_r1, 1);
s390_lg (code, ins->dreg, s390_r13, s390_r1, 0);
}
break;
case OP_TLS_SET: {
if (s390_is_imm16 (ins->inst_offset)) {
s390_lghi (code, s390_r13, ins->inst_offset);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_lgfi (code, s390_r13, ins->inst_offset);
} else {
S390_SET (code, s390_r13, ins->inst_offset);
}
s390_ear (code, s390_r1, 0);
s390_sllg(code, s390_r1, s390_r1, 0, 32);
s390_ear (code, s390_r1, 1);
s390_stg (code, ins->sreg1, s390_r13, s390_r1, 0);
}
break;
case OP_TAILCALL_PARAMETER :
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL :
case OP_TAILCALL_REG :
case OP_TAILCALL_MEMBASE : {
call = (MonoCallInst *) ins;
/*
* Restore SP to caller's SP
*/
code = backUpStackPtr(cfg, code);
/*
* If the destination is specified as a register or membase then
* save destination so it doesn't get overwritten by the restores
*/
if (ins->opcode != OP_TAILCALL)
s390_lgr (code, s390_r1, ins->sreg1);
/*
* We have to restore R6, so it cannot be used as argument register.
* This is ensured by mono_arch_tailcall_supported, but verify here.
*/
g_assert (!(call->used_iregs & (1 << S390_LAST_ARG_REG)));
/*
* Likewise for the IMT/RGCTX register
*/
g_assert (!(call->used_iregs & (1 << MONO_ARCH_RGCTX_REG)));
g_assert (!(call->rgctx_reg));
/*
* Restore all general registers
*/
s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
/*
* Restore any FP registers that have been altered
*/
if (cfg->arch.fpSize != 0) {
int fpOffset = -cfg->arch.fpSize;
for (int i = 8; i < 16; i++) {
if (cfg->arch.used_fp_regs & (1 << i)) {
s390_ldy (code, i, 0, STK_BASE, fpOffset);
fpOffset += sizeof(double);
}
}
}
if (ins->opcode == OP_TAILCALL_REG) {
s390_br (code, s390_r1);
} else {
if (ins->opcode == OP_TAILCALL_MEMBASE) {
if (mono_hwcap_s390x_has_mie2) {
s390_bi (code, 0, s390_r1, ins->inst_offset);
} else {
s390_lg (code, s390_r1, 0, s390_r1, ins->inst_offset);
s390_br (code, s390_r1);
}
} else {
mono_add_patch_info_rel (cfg, code - cfg->native_code,
MONO_PATCH_INFO_METHOD_JUMP,
call->method, MONO_R_S390_THUNKED);
S390_BR_TEMPLATE (code, s390_r1);
cfg->thunk_area += THUNK_SIZE;
}
}
}
break;
case OP_CHECK_THIS: {
/* ensure ins->sreg1 is not NULL */
s390_lg (code, s390_r0, 0, ins->sreg1, 0);
s390_ltgr (code, s390_r0, s390_r0);
}
break;
case OP_ARGLIST: {
const int offset = cfg->sig_cookie + cfg->stack_usage;
S390_SET (code, s390_r0, offset);
s390_agr (code, s390_r0, cfg->frame_reg);
s390_stg (code, s390_r0, 0, ins->sreg1, 0);
}
break;
case OP_FCALL: {
call = (MonoCallInst *) ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
}
break;
case OP_RCALL: {
call = (MonoCallInst *) ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
if (ins->dreg != s390_f0)
s390_ldr (code, ins->dreg, s390_f0);
break;
}
case OP_LCALL:
case OP_VCALL:
case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL: {
call = (MonoCallInst *) ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
}
break;
case OP_FCALL_REG:
call = (MonoCallInst*)ins;
s390_lgr (code, s390_r1, ins->sreg1);
s390_basr (code, s390_r14, s390_r1);
break;
case OP_RCALL_REG:
call = (MonoCallInst*)ins;
s390_lgr (code, s390_r1, ins->sreg1);
s390_basr (code, s390_r14, s390_r1);
if (ins->dreg != s390_f0)
s390_ldr (code, ins->dreg, s390_f0);
break;
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_basr (code, s390_r14, s390_r1);
}
break;
case OP_FCALL_MEMBASE:
call = (MonoCallInst*)ins;
s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset);
s390_basr (code, s390_r14, s390_r1);
break;
case OP_RCALL_MEMBASE:
call = (MonoCallInst*)ins;
s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset);
s390_basr (code, s390_r14, s390_r1);
if (ins->dreg != s390_f0)
s390_ldr (code, ins->dreg, s390_f0);
break;
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset);
s390_basr (code, s390_r14, s390_r1);
}
break;
case OP_LOCALLOC: {
int area_offset;
if (cfg->param_area == 0)
area_offset = S390_MINIMAL_STACK_SIZE;
else
area_offset = cfg->param_area;
area_offset = S390_ALIGN(area_offset, S390_STACK_ALIGNMENT);
/* Get current backchain pointer */
s390_lg (code, s390_r13, 0, STK_BASE, 0);
/*
* Round object size to doubleword
*/
s390_lgr (code, s390_r1, ins->sreg1);
s390_aghi (code, s390_r1, 7);
s390_srlg (code, s390_r1, s390_r1, 0, 3);
s390_sllg (code, s390_r1, s390_r1, 0, 3);
if (mono_hwcap_s390x_has_gie) {
if (ins->flags & MONO_INST_INIT)
s390_lgr (code, s390_r0, s390_r1);
s390_risbg (code, ins->dreg, s390_r1, 0, 0xb3, 0);
s390_sgrk (code, ins->dreg, STK_BASE, ins->dreg);
s390_cgr (code, STK_BASE, ins->dreg); /* L0: */
s390_je (code, 9); /* je L1 */
s390_aghi (code, STK_BASE, -4096);
s390_mvghi (code, s390_r15, 0, 0);
s390_j (code, -9); /* j L0 */
s390_risbg (code, ins->dreg, s390_r1, 0x34, 0xbf, 0); /* L1: */
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jz (code, 13); /* jz L2: */
s390_sgr (code, STK_BASE, ins->dreg);
s390_risbg (code, s390_r1, s390_r1, 0x34, 0xbf, 0);
s390_lay (code, s390_r1, s390_r1, STK_BASE, -8);
s390_mvghi (code, s390_r1, 0, 0);
/* L2: */
} else {
s390_lgr (code, ins->dreg, s390_r1);
s390_nill (code, ins->dreg, 0xf000);
s390_lgr (code, s390_r0, STK_BASE);
s390_sgr (code, s390_r0, ins->dreg);
s390_lgr (code, ins->dreg, s390_r0);
s390_cgr (code, STK_BASE, ins->dreg); /* L0: */
s390_je (code, 11); /* je L1 */
s390_aghi (code, STK_BASE, -4096);
s390_lghi (code, s390_r0, 0);
s390_stg (code, s390_r0, 0, STK_BASE, 4088);
s390_j (code, -11); /* j L0 */
s390_lghi (code, ins->dreg, 4095); /* L1: */
s390_ngr (code, ins->dreg, s390_r1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jz (code, 7); /* jz L2 */
s390_sgr (code, STK_BASE, ins->dreg);
s390_stg (code, ins->dreg, s390_r1, STK_BASE, -8);
/* L2: */
if (ins->flags & MONO_INST_INIT)
s390_lgr (code, s390_r0, s390_r1);
}
/*
* Compute address of localloc'd object
*/
s390_lgr (code, s390_r1, STK_BASE);
if (s390_is_imm16(area_offset))
s390_aghi (code, s390_r1, area_offset);
else
s390_agfi (code, s390_r1, area_offset);
s390_aghi (code, s390_r1, 7);
s390_srlg (code, s390_r1, s390_r1, 0, 3);
s390_sllg (code, s390_r1, s390_r1, 0, 3);
s390_lgr (code, ins->dreg, s390_r1);
/* Save backchain pointer */
s390_stg (code, s390_r13, 0, STK_BASE, 0);
/*
* If we need to zero the area then clear from localloc start
* using the length we saved earlier
*/
if (ins->flags & MONO_INST_INIT) {
s390_lgr (code, s390_r1, s390_r0);
s390_lgr (code, s390_r0, ins->dreg);
s390_lgr (code, s390_r14, s390_r12);
s390_lghi (code, s390_r13, 0);
s390_mvcle(code, s390_r0, s390_r12, 0, 0);
s390_jo (code, -2);
s390_lgr (code, s390_r12, s390_r14);
}
/*
* If we have an LMF then we have to adjust its BP
*/
if (cfg->method->save_lmf) {
int lmfOffset = cfg->stack_usage - sizeof(MonoLMF);
if (s390_is_imm16(lmfOffset)) {
s390_lghi (code, s390_r13, lmfOffset);
} else if (s390_is_imm32(lmfOffset)) {
s390_lgfi (code, s390_r13, lmfOffset);
} else {
S390_SET (code, s390_r13, lmfOffset);
}
s390_stg (code, s390_r15, s390_r13, cfg->frame_reg,
MONO_STRUCT_OFFSET(MonoLMF, ebp));
}
}
break;
case OP_THROW: {
s390_lgr (code, s390_r2, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
}
break;
case OP_RETHROW: {
s390_lgr (code, s390_r2, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
}
break;
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
S390_LONG (code, stg, stg, s390_r14, 0,
spvar->inst_basereg,
spvar->inst_offset);
}
break;
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (ins->sreg1 != s390_r2)
s390_lgr(code, s390_r2, ins->sreg1);
S390_LONG (code, lg, lg, s390_r14, 0,
spvar->inst_basereg,
spvar->inst_offset);
s390_br (code, s390_r14);
}
break;
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
S390_LONG (code, lg, lg, s390_r14, 0,
spvar->inst_basereg,
spvar->inst_offset);
s390_br (code, s390_r14);
}
break;
case OP_CALL_HANDLER: {
mono_add_patch_info_rel (cfg, code-cfg->native_code,
MONO_PATCH_INFO_BB, ins->inst_target_bb,
MONO_R_S390_DIRECT);
s390_brasl (code, s390_r14, 0);
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
}
break;
case OP_LABEL: {
ins->inst_c0 = code - cfg->native_code;
}
break;
case OP_RELAXED_NOP:
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_I8CONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL: {
}
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
case OP_SEQ_POINT: {
MonoInst *var;
RI_Format *o[2];
guint16 displace;
if (cfg->compile_aot)
NOT_IMPLEMENTED;
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
var = cfg->arch.ss_tramp_var;
s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset);
if (mono_hwcap_s390x_has_eif) {
s390_ltg (code, s390_r14, 0, s390_r1, 0);
} else {
s390_lg (code, s390_r14, 0, s390_r1, 0);
s390_ltgr (code, s390_r14, s390_r14);
}
o[0] = (RI_Format *) code;
s390_jz (code, 4);
s390_lgr (code, s390_r1, cfg->frame_reg);
s390_basr (code, s390_r14, s390_r14);
displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2;
o[0]->i2 = displace;
}
/*
* This is the address which is saved in seq points,
*/
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
var = cfg->arch.bp_tramp_var;
s390_lghi (code, s390_r1, 0);
s390_ltgr (code, s390_r1, s390_r1);
o[0] = (RI_Format *) code;
s390_jz (code, 0);
s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset);
if (mono_hwcap_s390x_has_eif) {
s390_ltg (code, s390_r14, 0, s390_r1, 0);
} else {
s390_lg (code, s390_r1, 0, s390_r1, 0);
s390_ltgr (code, s390_r14, s390_r1);
}
o[1] = (RI_Format *) code;
s390_jz (code, 4);
s390_lgr (code, s390_r1, cfg->frame_reg);
s390_basr (code, s390_r14, s390_r14);
displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2;
o[0]->i2 = displace;
displace = ((uintptr_t) code - (uintptr_t) o[1]) / 2;
o[1]->i2 = displace;
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
* to another IL offset.
*/
s390_nop (code);
break;
}
case OP_GENERIC_CLASS_INIT: {
static int byte_offset = -1;
static guint8 bitmask;
short int *jump;
g_assert (ins->sreg1 == S390_FIRST_ARG_REG);
if (byte_offset < 0)
mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
s390_tm (code, ins->sreg1, byte_offset, bitmask);
s390_jo (code, 0); CODEPTR(code, jump);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
PTRSLOT (code, jump);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_BR:
EMIT_UNCOND_BRANCH(ins);
break;
case OP_BR_REG: {
s390_br (code, ins->sreg1);
}
break;
case OP_CEQ:
case OP_ICEQ:
case OP_LCEQ: {
s390_lghi(code, ins->dreg, 1);
s390_jz (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CLT:
case OP_ICLT:
case OP_LCLT: {
s390_lghi(code, ins->dreg, 1);
s390_jl (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CLT_UN:
case OP_ICLT_UN:
case OP_LCLT_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CGT:
case OP_ICGT:
case OP_LCGT: {
s390_lghi(code, ins->dreg, 1);
s390_jh (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CGT_UN:
case OP_ICGT_UN:
case OP_LCGT_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jho (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICNEQ: {
s390_lghi(code, ins->dreg, 1);
s390_jne (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICGE: {
s390_lghi(code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICLE: {
s390_lghi(code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICGE_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICLE_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_IEQ:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_EQ, ins->inst_p1);
break;
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_INE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NE, ins->inst_p1);
break;
case OP_COND_EXC_LT:
case OP_COND_EXC_ILT:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_ILT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, ins->inst_p1);
break;
case OP_COND_EXC_GT:
case OP_COND_EXC_IGT:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_IGT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, ins->inst_p1);
break;
case OP_COND_EXC_GE:
case OP_COND_EXC_IGE:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_IGE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GE, ins->inst_p1);
break;
case OP_COND_EXC_LE:
case OP_COND_EXC_ILE:
case OP_COND_EXC_LE_UN:
case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LE, ins->inst_p1);
break;
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, ins->inst_p1);
break;
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NO, ins->inst_p1);
break;
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, ins->inst_p1);
break;
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, ins->inst_p1);
break;
case OP_LBEQ:
case OP_IBEQ:
EMIT_COND_BRANCH (ins, S390_CC_EQ);
break;
case OP_LBNE_UN:
case OP_IBNE_UN:
EMIT_COND_BRANCH (ins, S390_CC_NE);
break;
case OP_LBLT:
case OP_LBLT_UN:
case OP_IBLT:
case OP_IBLT_UN:
EMIT_COND_BRANCH (ins, S390_CC_LT);
break;
case OP_LBGT:
case OP_LBGT_UN:
case OP_IBGT:
case OP_IBGT_UN:
EMIT_COND_BRANCH (ins, S390_CC_GT);
break;
case OP_LBGE:
case OP_LBGE_UN:
case OP_IBGE:
case OP_IBGE_UN:
EMIT_COND_BRANCH (ins, S390_CC_GE);
break;
case OP_LBLE:
case OP_LBLE_UN:
case OP_IBLE:
case OP_IBLE_UN:
EMIT_COND_BRANCH (ins, S390_CC_LE);
break;
case OP_S390_CRJ:
EMIT_COMP_AND_BRANCH(ins, crj, cr);
break;
case OP_S390_CLRJ:
EMIT_COMP_AND_BRANCH(ins, clrj, clr);
break;
case OP_S390_CGRJ:
EMIT_COMP_AND_BRANCH(ins, cgrj, cgr);
break;
case OP_S390_CLGRJ:
EMIT_COMP_AND_BRANCH(ins, clgrj, clgr);
break;
case OP_S390_CIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, crj, cr, ltr, FALSE);
break;
case OP_S390_CLIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, clrj, clr, ltr, TRUE);
break;
case OP_S390_CGIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, cgrj, cgr, ltgr, FALSE);
break;
case OP_S390_CLGIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, clgrj, clgr, ltgr, TRUE);
break;
/* floating point opcodes */
case OP_R8CONST: {
double d = *(double *) ins->inst_p0;
if (d == 0) {
s390_lzdr (code, ins->dreg);
if (mono_signbit (d) != 0)
s390_lndbr (code, ins->dreg, ins->dreg);
} else {
S390_SET (code, s390_r13, ins->inst_p0);
s390_ld (code, ins->dreg, 0, s390_r13, 0);
}
}
break;
case OP_R4CONST: {
float f = *(float *) ins->inst_p0;
if (f == 0) {
s390_lzer (code, ins->dreg);
if (mono_signbit (f) != 0)
s390_lnebr (code, ins->dreg, ins->dreg);
} else {
S390_SET (code, s390_r13, ins->inst_p0);
s390_le (code, ins->dreg, 0, s390_r13, 0);
s390_le (code, ins->dreg, 0, s390_r13, 0);
}
}
break;
case OP_STORER8_MEMBASE_REG: {
S390_LONG (code, stdy, std, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADR8_MEMBASE: {
S390_LONG (code, ldy, ld, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_STORER4_MEMBASE_REG: {
S390_LONG (code, stey, ste, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADR4_MEMBASE: {
S390_LONG (code, ley, le, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_ICONV_TO_R_UN: {
if (mono_hwcap_s390x_has_fpe) {
s390_cdlfbr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
s390_llgfr (code, s390_r0, ins->sreg1);
s390_cdgbr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LCONV_TO_R_UN: {
if (mono_hwcap_s390x_has_fpe) {
s390_cdlgbr (code, ins->dreg, 6, ins->sreg1, 0);
} else {
short int *jump;
s390_lgdr (code, s390_r0, s390_r15);
s390_lgdr (code, s390_r1, s390_r13);
s390_lgdr (code, s390_r14, s390_r12);
s390_cxgbr (code, s390_f12, ins->sreg1);
s390_ltgr (code, ins->sreg1, ins->sreg1);
s390_jnl (code, 0); CODEPTR(code, jump);
S390_SET (code, s390_r13, 0x403f000000000000llu);
s390_lgdr (code, s390_f13, s390_r13);
s390_lzdr (code, s390_f15);
s390_axbr (code, s390_f12, s390_f13);
PTRSLOT(code, jump);
s390_ldxbr (code, s390_f13, s390_f12);
s390_ldr (code, ins->dreg, s390_f13);
s390_ldgr (code, s390_f12, s390_r14);
s390_ldgr (code, s390_f13, s390_r1);
s390_ldgr (code, s390_f15, s390_r0);
}
}
break;
case OP_ICONV_TO_R4:
s390_cefbr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_R4:
s390_cegbr (code, ins->dreg, ins->sreg1);
break;
case OP_ICONV_TO_R8:
s390_cdfbr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_R8:
s390_cdgbr (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_I1:
s390_cgdbr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_FCONV_TO_U1:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
}
break;
case OP_FCONV_TO_I2:
s390_cgdbr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x8000);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_FCONV_TO_U2:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
}
break;
case OP_FCONV_TO_I4:
s390_cfdbr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_FCONV_TO_U4:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
}
break;
case OP_FCONV_TO_I8:
case OP_FCONV_TO_I:
s390_cgdbr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_FCONV_TO_U8:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
}
break;
case OP_RCONV_TO_I1:
s390_cgebr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_RCONV_TO_U1:
if (mono_hwcap_s390x_has_fpe) {
s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
}
break;
case OP_RCONV_TO_I2:
s390_cgebr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x8000);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_RCONV_TO_U2:
if (mono_hwcap_s390x_has_fpe) {
s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
}
break;
case OP_RCONV_TO_I4:
s390_cfebr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_RCONV_TO_U4:
if (mono_hwcap_s390x_has_fpe) {
s390_clfebr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
}
break;
case OP_RCONV_TO_I8:
case OP_RCONV_TO_I:
s390_cgebr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_RCONV_TO_U8:
if (mono_hwcap_s390x_has_fpe) {
s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
}
break;
case OP_LCONV_TO_OVF_I: {
/* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */
short int *o[5];
s390_ltgr (code, ins->sreg2, ins->sreg2);
s390_jnl (code, 0); CODEPTR(code, o[0]);
s390_ltgr (code, ins->sreg1, ins->sreg1);
s390_jnl (code, 0); CODEPTR(code, o[1]);
s390_lhi (code, s390_r13, -1);
s390_cgr (code, ins->sreg1, s390_r13);
s390_jnz (code, 0); CODEPTR(code, o[2]);
if (ins->dreg != ins->sreg2)
s390_lgr (code, ins->dreg, ins->sreg2);
s390_j (code, 0); CODEPTR(code, o[3]);
PTRSLOT(code, o[0]);
s390_jz (code, 0); CODEPTR(code, o[4]);
PTRSLOT(code, o[1]);
PTRSLOT(code, o[2]);
mono_add_patch_info (cfg, code - cfg->native_code,
MONO_PATCH_INFO_EXC, "OverflowException");
s390_brasl (code, s390_r14, 0);
PTRSLOT(code, o[3]);
PTRSLOT(code, o[4]);
}
break;
case OP_ABS:
s390_lpdbr (code, ins->dreg, ins->sreg1);
break;
case OP_ABSF:
s390_lpebr (code, ins->dreg, ins->sreg1);
break;
case OP_CEIL:
s390_fidbra (code, ins->dreg, 6, ins->sreg1, 4);
break;
case OP_CEILF:
s390_fiebra (code, ins->dreg, 6, ins->sreg1, 4);
break;
case OP_FLOOR:
s390_fidbra (code, ins->dreg, 7, ins->sreg1, 4);
break;
case OP_FLOORF:
s390_fiebra (code, ins->dreg, 7, ins->sreg1, 4);
break;
case OP_FCOPYSIGN:
s390_cpsdr (code, ins->dreg, ins->sreg2, ins->sreg1);
break;
case OP_ROUND:
s390_fidbra (code, ins->dreg, 4, ins->sreg1, 4);
break;
case OP_SQRT:
s390_sqdbr (code, ins->dreg, ins->sreg1);
break;
case OP_SQRTF:
s390_sqebr (code, ins->dreg, ins->sreg1);
break;
case OP_TRUNC:
s390_fidbra (code, ins->dreg, 5, ins->sreg1, 4);
break;
case OP_TRUNCF:
s390_fiebra (code, ins->dreg, 5, ins->sreg1, 4);
break;
case OP_FADD: {
CHECK_SRCDST_COM_F;
s390_adbr (code, ins->dreg, src2);
}
break;
case OP_RADD: {
CHECK_SRCDST_COM_F;
s390_aebr (code, ins->dreg, src2);
}
break;
case OP_FSUB: {
CHECK_SRCDST_NCOM_F(sdbr);
}
break;
case OP_RSUB: {
CHECK_SRCDST_NCOM_F(sebr);
}
break;
case OP_FMUL: {
CHECK_SRCDST_COM_F;
s390_mdbr (code, ins->dreg, src2);
}
break;
case OP_RMUL: {
CHECK_SRCDST_COM_F;
s390_meer (code, ins->dreg, src2);
}
break;
case OP_FDIV: {
CHECK_SRCDST_NCOM_F(ddbr);
}
break;
case OP_RDIV: {
CHECK_SRCDST_NCOM_F(debr);
}
break;
case OP_FNEG: {
s390_lcdbr (code, ins->dreg, ins->sreg1);
}
break;
case OP_RNEG: {
s390_lcebr (code, ins->dreg, ins->sreg1);
}
break;
case OP_FREM: {
CHECK_SRCDST_NCOM_FR(didbr, 5);
}
break;
case OP_RREM: {
CHECK_SRCDST_NCOM_FR(diebr, 5);
}
break;
case OP_FCOMPARE: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_RCOMPARE: {
s390_cebr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_FCEQ: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_je (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCLT: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jl (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCLT_UN: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCGT: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jh (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCGT_UN: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jho (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCNEQ: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jne (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCGE: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCLE: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCEQ: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_je (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCLT: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jl (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCLT_UN: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCGT: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jh (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCGT_UN: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jho (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCNEQ: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jne (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCGE: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCLE: {
s390_cebr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FBEQ: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_EQ);
PTRSLOT (code, o);
}
break;
case OP_FBNE_UN:
EMIT_COND_BRANCH (ins, S390_CC_NE|S390_CC_OV);
break;
case OP_FBLT: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_LT);
PTRSLOT (code, o);
}
break;
case OP_FBLT_UN:
EMIT_COND_BRANCH (ins, S390_CC_LT|S390_CC_OV);
break;
case OP_FBGT: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_GT);
PTRSLOT (code, o);
}
break;
case OP_FBGT_UN:
EMIT_COND_BRANCH (ins, S390_CC_GT|S390_CC_OV);
break;
case OP_FBGE: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_GE);
PTRSLOT (code, o);
}
break;
case OP_FBGE_UN:
EMIT_COND_BRANCH (ins, S390_CC_GE|S390_CC_OV);
break;
case OP_FBLE: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_LE);
PTRSLOT (code, o);
}
break;
case OP_FBLE_UN:
EMIT_COND_BRANCH (ins, S390_CC_LE|S390_CC_OV);
break;
case OP_CKFINITE: {
short *o;
s390_lhi (code, s390_r13, 0x7f);
s390_tcdb (code, ins->sreg1, 0, s390_r13, 0);
s390_jz (code, 0); CODEPTR(code, o);
mono_add_patch_info (cfg, code - cfg->native_code,
MONO_PATCH_INFO_EXC, "OverflowException");
s390_brasl (code, s390_r14,0);
PTRSLOT(code, o);
}
break;
case OP_S390_MOVE: {
if (ins->backend.size > 0) {
if (ins->backend.size <= 256) {
s390_mvc (code, ins->backend.size, ins->sreg2,
ins->inst_offset, ins->sreg1, ins->inst_imm);
} else {
s390_lgr (code, s390_r0, ins->sreg2);
if (ins->inst_offset > 0) {
if (s390_is_imm16 (ins->inst_offset)) {
s390_aghi (code, s390_r0, ins->inst_offset);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_agfi (code, s390_r0, ins->inst_offset);
} else {
S390_SET (code, s390_r13, ins->inst_offset);
s390_agr (code, s390_r0, s390_r13);
}
}
s390_lgr (code, s390_r12, ins->sreg1);
if (ins->inst_imm > 0) {
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghi (code, s390_r12, ins->inst_imm);
} else if (s390_is_imm32 (ins->inst_imm)) {
s390_agfi (code, s390_r12, ins->inst_imm);
} else {
S390_SET (code, s390_r13, ins->inst_imm);
s390_agr (code, s390_r12, s390_r13);
}
}
if (s390_is_imm16 (ins->backend.size)) {
s390_lghi (code, s390_r1, ins->backend.size);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_agfi (code, s390_r1, ins->backend.size);
} else {
S390_SET (code, s390_r13, ins->backend.size);
s390_agr (code, s390_r1, s390_r13);
}
s390_lgr (code, s390_r13, s390_r1);
s390_mvcle(code, s390_r0, s390_r12, 0, 0);
s390_jo (code, -2);
}
}
}
break;
case OP_ATOMIC_ADD_I8: {
if (mono_hwcap_s390x_has_ia) {
s390_laag(code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
if (mono_hwcap_s390x_has_mlt) {
s390_agrk(code, ins->dreg, s390_r0, ins->sreg2);
} else {
s390_agr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r0);
}
} else {
s390_lgr (code, s390_r1, ins->sreg2);
s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_agr (code, s390_r1, s390_r0);
s390_csg (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -10);
s390_lgr (code, ins->dreg, s390_r1);
}
}
break;
case OP_ATOMIC_EXCHANGE_I8: {
s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_csg (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -6);
s390_lgr (code, ins->dreg, s390_r0);
}
break;
case OP_ATOMIC_ADD_I4: {
if (mono_hwcap_s390x_has_ia) {
s390_laa (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
s390_ar (code, s390_r0, ins->sreg2);
s390_lgfr(code, ins->dreg, s390_r0);
} else {
s390_lgfr(code, s390_r1, ins->sreg2);
s390_lgf (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_agr (code, s390_r1, s390_r0);
s390_cs (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -9);
s390_lgfr(code, ins->dreg, s390_r1);
}
}
break;
case OP_ATOMIC_EXCHANGE_I4: {
s390_l (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_cs (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -4);
s390_lgfr(code, ins->dreg, s390_r0);
}
break;
case OP_S390_BKCHAIN: {
s390_lgr (code, ins->dreg, ins->sreg1);
if (s390_is_imm16 (cfg->stack_offset)) {
s390_aghi (code, ins->dreg, cfg->stack_offset);
} else if (s390_is_imm32 (cfg->stack_offset)) {
s390_agfi (code, ins->dreg, cfg->stack_offset);
} else {
S390_SET (code, s390_r13, cfg->stack_offset);
s390_agr (code, ins->dreg, s390_r13);
}
}
break;
case OP_MEMORY_BARRIER:
s390_mem (code);
break;
case OP_POPCNT32:
s390_llgfr (code, s390_r1, ins->sreg1);
if (mono_hwcap_s390x_has_mie3) {
s390_popcnt (code, ins->dreg, 0x80, s390_r1);
} else {
s390_popcnt (code, s390_r0, 0, s390_r1);
s390_ahhlr (code, s390_r0, s390_r0, s390_r0);
s390_sllg (code, s390_r1, s390_r0, 0, 16);
s390_algr (code, s390_r0, s390_r1);
s390_sllg (code, s390_r1, s390_r0, 0, 8);
s390_algr (code, s390_r0, s390_r1);
s390_srlg (code, ins->dreg, s390_r0, 0, 56);
}
break;
case OP_POPCNT64:
if (mono_hwcap_s390x_has_mie3) {
s390_popcnt (code, ins->dreg, 0x80, ins->sreg1);
} else {
s390_ahhlr (code, s390_r0, s390_r0, s390_r0);
s390_sllg (code, s390_r1, s390_r0, 0, 16);
s390_algr (code, s390_r0, s390_r1);
s390_sllg (code, s390_r1, s390_r0, 0, 8);
s390_algr (code, s390_r0, s390_r1);
s390_srlg (code, ins->dreg, s390_r0, 0, 56);
}
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
short *br;
s390_ltg (code, s390_r0, 0, ins->sreg1, 0);
s390_jz (code, 0); CODEPTR(code, br);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
PTRSLOT (code, br);
break;
}
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
break;
case OP_GC_SPILL_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
#ifdef MONO_ARCH_SIMD_INTRINSICS
case OP_ADDPS:
s390x_addps (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPS:
s390x_divps (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPS:
s390x_mulps (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPS:
s390x_subps (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPS:
s390x_maxps (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPS:
s390x_minps (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
s390x_cmpps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPS:
s390x_andps (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPS:
s390x_andnps (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPS:
s390x_orps (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPS:
s390x_xorps (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPS:
s390x_sqrtps (code, ins->dreg, ins->sreg1);
break;
case OP_RSQRTPS:
s390x_rsqrtps (code, ins->dreg, ins->sreg1);
break;
case OP_RCPPS:
s390x_rcpps (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPS:
s390x_addsubps (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPS:
s390x_haddps (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPS:
s390x_hsubps (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPS_HIGH:
s390x_movshdup (code, ins->dreg, ins->sreg1);
break;
case OP_DUPPS_LOW:
s390x_movsldup (code, ins->dreg, ins->sreg1);
break;
case OP_PSHUFLEW_HIGH:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_pshufhw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLEW_LOW:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_pshuflw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLED:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_pshufd_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_SHUFPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_shufps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_SHUFPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3);
s390x_shufpd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ADDPD:
s390x_addpd (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPD:
s390x_divpd (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPD:
s390x_mulpd (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPD:
s390x_subpd (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPD:
s390x_maxpd (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPD:
s390x_minpd (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
s390x_cmppd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPD:
s390x_andpd (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPD:
s390x_andnpd (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPD:
s390x_orpd (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPD:
s390x_xorpd (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPD:
s390x_sqrtpd (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPD:
s390x_addsubpd (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPD:
s390x_haddpd (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPD:
s390x_hsubpd (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPD:
s390x_movddup (code, ins->dreg, ins->sreg1);
break;
case OP_EXTRACT_MASK:
s390x_pmovmskb (code, ins->dreg, ins->sreg1);
break;
case OP_PAND:
s390x_pand (code, ins->sreg1, ins->sreg2);
break;
case OP_POR:
s390x_por (code, ins->sreg1, ins->sreg2);
break;
case OP_PXOR:
s390x_pxor (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB:
s390x_paddb (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW:
s390x_paddw (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDD:
s390x_paddd (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDQ:
s390x_paddq (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB:
s390x_psubb (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW:
s390x_psubw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBD:
s390x_psubd (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBQ:
s390x_psubq (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB_UN:
s390x_pmaxub (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW_UN:
s390x_pmaxuw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD_UN:
s390x_pmaxud (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB:
s390x_pmaxsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW:
s390x_pmaxsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD:
s390x_pmaxsd (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGB_UN:
s390x_pavgb (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGW_UN:
s390x_pavgw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB_UN:
s390x_pminub (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW_UN:
s390x_pminuw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND_UN:
s390x_pminud (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB:
s390x_pminsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW:
s390x_pminsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND:
s390x_pminsd (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQB:
s390x_pcmpeqb (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQW:
s390x_pcmpeqw (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQD:
s390x_pcmpeqd (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQQ:
s390x_pcmpeqq (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTB:
s390x_pcmpgtb (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTW:
s390x_pcmpgtw (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTD:
s390x_pcmpgtd (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTQ:
s390x_pcmpgtq (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUM_ABS_DIFF:
s390x_psadbw (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWB:
s390x_punpcklbw (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWW:
s390x_punpcklwd (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWD:
s390x_punpckldq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWQ:
s390x_punpcklqdq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPS:
s390x_unpcklps (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPD:
s390x_unpcklpd (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHB:
s390x_punpckhbw (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHW:
s390x_punpckhwd (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHD:
s390x_punpckhdq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHQ:
s390x_punpckhqdq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPS:
s390x_unpckhps (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPD:
s390x_unpckhpd (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW:
s390x_packsswb (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD:
s390x_packssdw (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW_UN:
s390x_packuswb (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD_UN:
s390x_packusdw (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT_UN:
s390x_paddusb (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT_UN:
s390x_psubusb (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT_UN:
s390x_paddusw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT_UN:
s390x_psubusw (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT:
s390x_paddsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT:
s390x_psubsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT:
s390x_paddsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT:
s390x_psubsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW:
s390x_pmullw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULD:
s390x_pmulld (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULQ:
s390x_pmuludq (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH_UN:
s390x_pmulhuw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH:
s390x_pmulhw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSHRW:
s390x_psrlw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRW_REG:
s390x_psrlw (code, ins->dreg, ins->sreg2);
break;
case OP_PSARW:
s390x_psraw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARW_REG:
s390x_psraw (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLW:
s390x_psllw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLW_REG:
s390x_psllw (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRD:
s390x_psrld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRD_REG:
s390x_psrld (code, ins->dreg, ins->sreg2);
break;
case OP_PSARD:
s390x_psrad_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARD_REG:
s390x_psrad (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLD:
s390x_pslld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLD_REG:
s390x_pslld (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRQ:
s390x_psrlq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRQ_REG:
s390x_psrlq (code, ins->dreg, ins->sreg2);
break;
/*TODO: This is appart of the sse spec but not added
case OP_PSARQ:
s390x_psraq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARQ_REG:
s390x_psraq (code, ins->dreg, ins->sreg2);
break;
*/
case OP_PSHLQ:
s390x_psllq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLQ_REG:
s390x_psllq (code, ins->dreg, ins->sreg2);
break;
case OP_CVTDQ2PD:
s390x_cvtdq2pd (code, ins->dreg, ins->sreg1);
break;
case OP_CVTDQ2PS:
s390x_cvtdq2ps (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2DQ:
s390x_cvtpd2dq (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2PS:
s390x_cvtpd2ps (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2DQ:
s390x_cvtps2dq (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2PD:
s390x_cvtps2pd (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPD2DQ:
s390x_cvttpd2dq (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPS2DQ:
s390x_cvttps2dq (code, ins->dreg, ins->sreg1);
break;
case OP_ICONV_TO_X:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I4:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I8:
if (ins->inst_c0) {
amd64_movhlps (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8);
} else {
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
}
break;
case OP_EXTRACT_I1:
case OP_EXTRACT_U1:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8);
amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I1, FALSE);
break;
case OP_EXTRACT_I2:
case OP_EXTRACT_U2:
/*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/
s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I2, TRUE, 4);
break;
case OP_EXTRACT_R8:
if (ins->inst_c0)
amd64_movhlps (code, ins->dreg, ins->sreg1);
else
s390x_movsd (code, ins->dreg, ins->sreg1);
break;
case OP_INSERT_I2:
s390x_pinsrw_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_EXTRACTX_U2:
s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_INSERTX_U1_SLOW:
/*sreg1 is the extracted ireg (scratch)
/sreg2 is the to be inserted ireg (scratch)
/dreg is the xreg to receive the value*/
/*clear the bits from the extracted word*/
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00);
/*shift the value to insert if needed*/
if (ins->inst_c0 & 1)
amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4);
/*join them together*/
amd64_alu (code, X86_OR, ins->sreg1, ins->sreg2);
s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2);
break;
case OP_INSERTX_I4_SLOW:
s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2);
amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16);
s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1);
break;
case OP_INSERTX_I8_SLOW:
amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8);
if (ins->inst_c0)
amd64_movlhps (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
else
s390x_movsd (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
break;
case OP_INSERTX_R4_SLOW:
switch (ins->inst_c0) {
case 0:
s390x_movss (code, ins->dreg, ins->sreg2);
break;
case 1:
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
s390x_movss (code, ins->dreg, ins->sreg2);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
break;
case 2:
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
s390x_movss (code, ins->dreg, ins->sreg2);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
break;
case 3:
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
s390x_movss (code, ins->dreg, ins->sreg2);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
break;
}
break;
case OP_INSERTX_R8_SLOW:
if (ins->inst_c0)
amd64_movlhps (code, ins->dreg, ins->sreg2);
else
s390x_movsd (code, ins->dreg, ins->sreg2);
break;
case OP_STOREX_MEMBASE_REG:
case OP_STOREX_MEMBASE:
s390x_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_LOADX_MEMBASE:
s390x_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_LOADX_ALIGNED_MEMBASE:
s390x_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_STOREX_ALIGNED_MEMBASE_REG:
s390x_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_STOREX_NTA_MEMBASE_REG:
s390x_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_PREFETCH_MEMBASE:
s390x_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
break;
case OP_XMOVE:
/*FIXME the peephole pass should have killed this*/
if (ins->dreg != ins->sreg1)
s390x_movaps (code, ins->dreg, ins->sreg1);
break;
case OP_XZERO:
s390x_pxor (code, ins->dreg, ins->dreg);
break;
case OP_ICONV_TO_R4_RAW:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_FCONV_TO_R8_X:
s390x_movsd (code, ins->dreg, ins->sreg1);
break;
case OP_XCONV_R8_TO_I4:
s390x_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
switch (ins->backend.source_opcode) {
case OP_FCONV_TO_I1:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
break;
case OP_FCONV_TO_U1:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_FCONV_TO_I2:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
break;
case OP_FCONV_TO_U2:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
break;
}
break;
case OP_EXPAND_I2:
s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 0);
s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 1);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I4:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I8:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44);
break;
case OP_EXPAND_R4:
s390x_movsd (code, ins->dreg, ins->sreg1);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_R8:
s390x_movsd (code, ins->dreg, ins->sreg1);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44);
break;
#endif
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
}
}
set_code_cursor (cfg, code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific registration of lowlevel calls
*
* Register routines to register optimized lowlevel operations
*/
void
mono_arch_register_lowlevel_calls (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific patching
* @param[in] @cfg - Compilation control block
* @param[in] @code - Start of code
* @param[in] @target - Target of patch
* @param[in] @relo - Relocation type
*
* Perform patching action
*/
static void
emit_patch_full (MonoCompile *cfg, MonoJumpInfo *ji, guint8 *code,
gpointer target, int relo)
{
guint8 *ip = ji->ip.i + code;
switch (relo) {
case MONO_R_S390_RELINS :
target = S390_RELATIVE(target, ip);
ip += 2;
s390_patch_rel (ip, (guint64) target);
break;
case MONO_R_S390_THUNKED :
if (cfg)
create_thunk(cfg, ip, code, target);
else
update_thunk(cfg, code, target);
break;
case MONO_R_S390_DIRECT :
S390_EMIT_CALL (ip, target);
break;
case MONO_R_S390_ADDR :
s390_patch_addr (ip, (guint64) target);
break;
case MONO_R_S390_SWITCH :
S390_EMIT_LOAD (ip, target);
break;
case MONO_R_S390_REL :
target = S390_RELATIVE(target, ip);
s390_patch_rel (ip, (guint64) target);
break;
default :
g_assert_not_reached();
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific patching of instructions and data
*
* @param[in] @cfg - Compile control block
* @param[in] @method - Current method
* @param[in] @code - Current code block
* @param[in] @ji - Jump information
* @param[in] @target - Target of patch
*
* Process the patch data created during the instruction build process.
* This resolves jumps, calls, variables etc.
*/
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
switch (ji->type) {
case MONO_PATCH_INFO_IP:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_EXC:
emit_patch_full (cfg, ji, code, target, MONO_R_S390_ADDR);
break;
case MONO_PATCH_INFO_BB:
case MONO_PATCH_INFO_JIT_ICALL_ADDR:
case MONO_PATCH_INFO_JIT_ICALL_ID:
case MONO_PATCH_INFO_METHOD:
emit_patch_full (cfg, ji, code, target, ji->relocation);
break;
case MONO_PATCH_INFO_METHOD_JUMP:
case MONO_PATCH_INFO_RGCTX_FETCH:
case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR:
case MONO_PATCH_INFO_ABS:
emit_patch_full (cfg, ji, code, target, MONO_R_S390_THUNKED);
break;
case MONO_PATCH_INFO_SWITCH:
emit_patch_full(cfg, ji, code, target, MONO_R_S390_SWITCH);
break;
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IMAGE:
case MONO_PATCH_INFO_FIELD:
case MONO_PATCH_INFO_IID:
case MONO_PATCH_INFO_EXC_NAME:
emit_patch_full(cfg, ji, code, target, MONO_R_S390_REL);
break;
case MONO_PATCH_INFO_NONE:
break;
default:
emit_patch_full (cfg, ji, code, target, MONO_R_S390_RELINS);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific prolog generation
*
* @param[in] @cfg - Compile control block
* @returns Location of code code generated
*
* Create the instruction sequence for entry into a method:
* - Determine stack size
* - Save preserved registers
* - Unload parameters
* - Determine if LMF needs saving and generate that sequence
*/
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
long alloc_size, pos, max_offset, i, cfa_offset = 0;
guint8 *code;
guint32 size;
CallInfo *cinfo;
int argsClobbered = 0,
lmfOffset,
fpOffset = 0;
cfg->code_size = 512;
if (method->save_lmf)
cfg->code_size += 200;
cfg->native_code = code = (guint8 *) g_malloc (cfg->code_size);
/**
* Create unwind information
*/
mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET);
s390_stmg (code, s390_r6, s390_r15, STK_BASE, S390_REG_SAVE_OFFSET);
emit_unwind_regs(cfg, code, s390_r6, s390_r15, S390_REG_SAVE_OFFSET - S390_CFA_OFFSET);
if (cfg->arch.bkchain_reg != -1)
s390_lgr (code, cfg->arch.bkchain_reg, STK_BASE);
/*
* If there are local allocations the R11 becomes the frame register
*/
if (cfg->flags & MONO_CFG_HAS_ALLOCA) {
cfg->used_int_regs |= 1 << s390_r11;
}
/*
* Check if FP registers need preserving
*/
if ((cfg->arch.used_fp_regs & S390_FP_SAVE_MASK) != 0) {
for (int i = s390_f8; i <= s390_f15; i++) {
if (cfg->arch.used_fp_regs & (1 << i))
fpOffset += sizeof(double);
}
fpOffset = S390_ALIGN(fpOffset, sizeof(double));
}
cfg->arch.fpSize = fpOffset;
/*
* Calculate stack requirements
*/
alloc_size = cfg->stack_offset + fpOffset;
cfg->stack_usage = cfa_offset = alloc_size;
s390_lgr (code, s390_r11, STK_BASE);
if (s390_is_imm16 (alloc_size)) {
s390_aghi (code, STK_BASE, -alloc_size);
} else if (s390_is_imm32 (alloc_size)) {
s390_agfi (code, STK_BASE, -alloc_size);
} else {
int stackSize = alloc_size;
while (stackSize > INT_MAX) {
s390_agfi (code, STK_BASE, -INT_MAX);
stackSize -= INT_MAX;
}
s390_agfi (code, STK_BASE, -stackSize);
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size + S390_CFA_OFFSET);
s390_stg (code, s390_r11, 0, STK_BASE, 0);
if (fpOffset > 0) {
int stkOffset = 0;
s390_lgr (code, s390_r1, s390_r11);
s390_aghi (code, s390_r1, -fpOffset);
for (int i = s390_f8; i <= s390_f15; i++) {
if (cfg->arch.used_fp_regs & (1 << i)) {
s390_std (code, i, 0, s390_r1, stkOffset);
emit_unwind_regs(cfg, code, 16+i, 16+i, stkOffset+fpOffset - S390_CFA_OFFSET);
stkOffset += sizeof(double);
}
}
}
if (cfg->frame_reg != STK_BASE) {
s390_lgr (code, s390_r11, STK_BASE);
mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
}
/* store runtime generic context */
if (cfg->rgctx_var) {
g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET);
s390_stg (code, MONO_ARCH_RGCTX_REG, 0,
cfg->rgctx_var->inst_basereg,
cfg->rgctx_var->inst_offset);
}
#if 0
char *methodName = getenv("MONO_TRACE_METHOD");
if (methodName != NULL) {
printf("ns: %s k: %s m: %s\n",method->klass->name_space,method->klass->name,method->name);fflush(stdout);
// Tests:set_ip
//if ((strcmp(method->klass->name_space,"") == 0) &&
// (strcmp(method->klass->name,"Tests") == 0) &&
// (strcmp(method->name, "set_ip") == 0)) {
// (strcmp("CancellationToken,TaskCreationOptions,TaskContinuationOptions,TaskScheduler",mono_signature_get_desc(method->signature, FALSE)) != 0)) {
if ((strcmp(method->name, methodName) == 0)) {
printf("SIGNATURE: %s\n",mono_signature_get_desc(method->signature, FALSE)); fflush(stdout);
s390_j (code, 0);
}
}
#endif
/* compute max_offset in order to use short forward jumps
* we always do it on s390 because the immediate displacement
* for jumps is too small
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
bb->max_offset = max_offset;
MONO_BB_FOR_EACH_INS (bb, ins)
max_offset += ins_get_size (ins->opcode);
}
/* load arguments allocated to register from the stack */
sig = mono_method_signature_internal (method);
pos = 0;
cinfo = cfg->arch.cinfo;
if (cinfo->struct_ret) {
ArgInfo *ainfo = &cinfo->ret;
inst = cfg->vret_addr;
inst->backend.size = ainfo->vtsize;
if (inst->opcode == OP_REGVAR)
s390_lgr (code, inst->dreg, ainfo->reg);
else
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
}
/**
* Process the arguments passed to the method
*/
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
inst = cfg->args [pos];
if (inst->opcode == OP_VTARG_ADDR)
inst = inst->inst_left;
if (inst->opcode == OP_REGVAR) {
if (ainfo->regtype == RegTypeGeneral)
s390_lgr (code, inst->dreg, ainfo->reg);
else if (ainfo->regtype == RegTypeFP) {
if (inst->dreg != ainfo->reg) {
s390_ldr (code, inst->dreg, ainfo->reg);
}
} else if (ainfo->regtype == RegTypeFPR4) {
} else if (ainfo->regtype == RegTypeBase) {
s390_lgr (code, s390_r13, STK_BASE);
s390_aghi (code, s390_r13, alloc_size);
s390_lg (code, inst->dreg, 0, s390_r13, ainfo->offset);
} else
g_assert_not_reached ();
if (cfg->verbose_level > 2)
g_print ("Argument %d assigned to register %s\n",
pos, mono_arch_regname (inst->dreg));
} else {
if (ainfo->regtype == RegTypeGeneral) {
if (!((ainfo->reg >= 2) && (ainfo->reg <= 6)))
g_assert_not_reached();
switch (ainfo->size) {
case 1:
s390_stc (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
case 2:
s390_sth (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
case 4:
s390_st (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
case 8:
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
}
} else if (ainfo->regtype == RegTypeBase) {
} else if (ainfo->regtype == RegTypeFP) {
s390_std (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
} else if (ainfo->regtype == RegTypeFPR4) {
s390_ste (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
} else if (ainfo->regtype == RegTypeStructByVal) {
int doffset = inst->inst_offset;
size = (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && sig->pinvoke && !sig->marshalling_disabled
? mono_class_native_size(mono_class_from_mono_type_internal (inst->inst_vtype), NULL)
: ainfo->size);
switch (size) {
case 1:
if (ainfo->reg != STK_BASE)
s390_stc (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
case 2:
if (ainfo->reg != STK_BASE)
s390_sth (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
case 4:
if (ainfo->reg != STK_BASE)
s390_st (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
case 8:
if (ainfo->reg != STK_BASE)
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
default:
if (ainfo->reg != STK_BASE)
s390_stg (code, ainfo->reg, 0, STK_BASE, doffset);
}
} else if (ainfo->regtype == RegTypeStructByAddr) {
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
} else
g_assert_not_reached ();
}
pos++;
}
if (method->save_lmf) {
/**
* Build the MonoLMF structure on the stack - see mini-s390x.h
*/
lmfOffset = alloc_size - sizeof(MonoLMF);
s390_lgr (code, s390_r13, cfg->frame_reg);
s390_aghi (code, s390_r13, lmfOffset);
/*
* Preserve the parameter registers while we fix up the lmf
*/
s390_stmg (code, s390_r2, s390_r6, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, pregs));
for (i = 0; i < 5; i++)
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, pregs) + i * sizeof(gulong), SLOT_NOREF);
/*
* On return from this call r2 have the address of the &lmf
*/
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern));
/*
* Set lmf.lmf_addr = jit_tls->lmf
*/
s390_stg (code, s390_r2, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, lmf_addr));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
/*
* Get current lmf
*/
s390_lg (code, s390_r0, 0, s390_r2, 0);
/*
* Set our lmf as the current lmf
*/
s390_stg (code, s390_r13, 0, s390_r2, 0);
/*
* Have our lmf.previous_lmf point to the last lmf
*/
s390_stg (code, s390_r0, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, previous_lmf));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
/*
* Save method info
*/
S390_SET (code, s390_r1, method);
s390_stg (code, s390_r1, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, method));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF);
/*
* Save the current IP
*/
s390_stg (code, STK_BASE, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, ebp));
s390_basr (code, s390_r1, 0);
s390_stg (code, s390_r1, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, eip));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, ebp), SLOT_NOREF);
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, eip), SLOT_NOREF);
/*
* Save general and floating point registers
*/
s390_stmg (code, s390_r2, s390_r12, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, gregs) + 2 * sizeof(gulong));
for (i = 0; i < 11; i++)
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, gregs) + i * sizeof(gulong), SLOT_NOREF);
fpOffset = lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, fregs);
for (i = 0; i < 16; i++) {
s390_std (code, i, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, fregs) + i * sizeof(gulong));
mini_gc_set_slot_type_from_fp (cfg, fpOffset, SLOT_NOREF);
fpOffset += sizeof(double);
}
/*
* Restore the parameter registers now that we've set up the lmf
*/
s390_lmg (code, s390_r2, s390_r6, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, pregs));
}
if (cfg->method->save_lmf)
argsClobbered = TRUE;
/*
* Optimize the common case of the first bblock making a call with the same
* arguments as the method. This works because the arguments are still in their
* original argument registers.
*/
if (!argsClobbered) {
MonoBasicBlock *first_bb = cfg->bb_entry;
MonoInst *next;
int filter = FILTER_IL_SEQ_POINT;
next = mono_bb_first_inst (first_bb, filter);
if (!next && first_bb->next_bb) {
first_bb = first_bb->next_bb;
next = mono_bb_first_inst (first_bb, filter);
}
if (first_bb->in_count > 1)
next = NULL;
for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gboolean match = FALSE;
inst = cfg->args [i];
if (inst->opcode != OP_REGVAR) {
switch (ainfo->regtype) {
case RegTypeGeneral: {
if (((next->opcode == OP_LOAD_MEMBASE) ||
(next->opcode == OP_LOADI4_MEMBASE)) &&
next->inst_basereg == inst->inst_basereg &&
next->inst_offset == inst->inst_offset) {
if (next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
} else {
next->opcode = OP_MOVE;
next->sreg1 = ainfo->reg;
/* Only continue if the instruction doesn't change argument regs */
if (next->dreg == ainfo->reg)
match = TRUE;
}
}
break;
}
default:
break;
}
} else {
/* Argument allocated to (non-volatile) register */
switch (ainfo->regtype) {
case RegTypeGeneral:
if (next->opcode == OP_MOVE &&
next->sreg1 == inst->dreg &&
next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
}
break;
default:
break;
}
}
if (match) {
next = mono_inst_next (next, filter);
if (!next)
break;
}
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *seq;
/* Initialize ss_tramp_var */
seq = cfg->arch.ss_tramp_var;
g_assert (seq->opcode == OP_REGOFFSET);
S390_SET (code, s390_r1, (guint64) &ss_trampoline);
s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset);
/* Initialize bp_tramp_var */
seq = cfg->arch.bp_tramp_var;
g_assert (seq->opcode == OP_REGOFFSET);
S390_SET (code, s390_r1, (guint64) &bp_trampoline);
s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset);
}
set_code_cursor (cfg, code);
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecutre-specific epilog generation
*
* @param[in] @cfg - Compile control block
*
* Create the instruction sequence for exit from a method
*/
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
guint8 *code;
int max_epilog_size = 96, i;
int fpOffset = 0;
if (cfg->method->save_lmf)
max_epilog_size += 128;
code = realloc_code (cfg, max_epilog_size);
cfg->has_unwind_info_for_epilog = TRUE;
/* Mark the start of the epilog */
mono_emit_unwind_op_mark_loc (cfg, code, 0);
/* Save the uwind state which is needed by the out-of-line code */
mono_emit_unwind_op_remember_state (cfg, code);
if (method->save_lmf)
restoreLMF(code, cfg->frame_reg, cfg->stack_usage);
code = backUpStackPtr(cfg, code);
mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET);
mono_emit_unwind_op_same_value (cfg, code, STK_BASE);
if (cfg->arch.fpSize != 0) {
fpOffset = -cfg->arch.fpSize;
for (int i=8; i<16; i++) {
if (cfg->arch.used_fp_regs & (1 << i)) {
s390_ldy (code, i, 0, STK_BASE, fpOffset);
mono_emit_unwind_op_same_value (cfg, code, 16+i);
fpOffset += sizeof(double);
}
}
}
s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
for (i = s390_r6; i < s390_r15; i++)
mono_emit_unwind_op_same_value (cfg, code, i);
s390_br (code, s390_r14);
/* Restore the unwind state to be the same as before the epilog */
mono_emit_unwind_op_restore_state (cfg, code);
/* Round up for start of any thunk entries */
code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3);
set_code_cursor (cfg, code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific exception emission
*
* @param[in] @cfg - Compile control block
*
* Create the instruction sequence for exception handling
*/
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
guint8 *code;
int nThrows = 0,
exc_count = 0,
iExc;
guint32 code_size;
MonoClass *exc_classes [MAX_EXC];
guint8 *exc_throw_start [MAX_EXC];
for (patch_info = cfg->patch_info;
patch_info;
patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_EXC)
exc_count++;
}
code_size = exc_count * 48;
code = realloc_code (cfg, code_size);
/*
* Add code to raise exceptions
*/
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC: {
guint8 *ip = patch_info->ip.i + cfg->native_code;
MonoClass *exc_class;
/*
* Patch the branch in epilog to come here
*/
s390_patch_rel (ip + 2, (guint64) S390_RELATIVE(code,ip));
exc_class = mono_class_load_from_name (mono_defaults.corlib,
"System",
patch_info->data.name);
for (iExc = 0; iExc < nThrows; ++iExc)
if (exc_classes [iExc] == exc_class)
break;
if (iExc < nThrows) {
s390_jcl (code, S390_CC_UN,
(guint64) exc_throw_start [iExc]);
patch_info->type = MONO_PATCH_INFO_NONE;
} else {
if (nThrows < MAX_EXC) {
exc_classes [nThrows] = exc_class;
exc_throw_start [nThrows] = code;
}
/*
* Patch the parameter passed to the handler
*/
S390_SET (code, s390_r2, m_class_get_type_token (exc_class));
/*
* Load return address & parameter register
*/
s390_larl (code, s390_r14, (guint64)S390_RELATIVE((patch_info->ip.i +
cfg->native_code + 8), code));
/*
* Reuse the current patch to set the jump
*/
patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
patch_info->ip.i = code - cfg->native_code;
patch_info->relocation = MONO_R_S390_THUNKED;
S390_BR_TEMPLATE (code, s390_r1);
cfg->thunk_area += THUNK_SIZE;
}
break;
}
default:
/* do nothing */
break;
}
}
/* Round up for start of any thunk entries */
code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3);
set_code_cursor (cfg, code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific finishing of initialization
*
* Perform any architectural-specific operations at the conclusion of
* the initialization phase
*/
void
mono_arch_finish_init (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific instruction emission for method
*
* @param[in] @cfg - Compile Control block
* @param[in] @cmethod - Current method
* @param[in] @fsig - Method signature
* @param[in] @args - Arguments to method
* @returns Instruction(s) required for architecture
*
* Provide any architectural shortcuts for specific methods.
*/
MonoInst *
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
int opcode = 0;
MonoStackType stack_type = STACK_R8;
if (cmethod->klass == mono_class_try_get_math_class ()) {
// unary double
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
if (strcmp (cmethod->name, "Abs") == 0) {
opcode = OP_ABS;
} else if (strcmp (cmethod->name, "Ceiling") == 0) {
opcode = OP_CEIL;
} else if (strcmp (cmethod->name, "Floor") == 0) {
opcode = OP_FLOOR;
} else if (strcmp (cmethod->name, "Round") == 0) {
opcode = OP_ROUND;
} else if (strcmp (cmethod->name, "Sqrt") == 0) {
opcode = OP_SQRT;
} else if (strcmp (cmethod->name, "Truncate") == 0) {
opcode = OP_TRUNC;
}
}
// unary float (overloaded)
else if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) {
if (strcmp (cmethod->name, "Abs") == 0) {
opcode = OP_ABSF;
stack_type = STACK_R4;
}
}
// binary double
else if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) {
if (strcmp (cmethod->name, "CopySign") == 0) {
opcode = OP_FCOPYSIGN;
}
}
} else if (cmethod->klass == mono_class_try_get_mathf_class ()) {
if (fsig->param_count == 1) {
stack_type = STACK_R4;
if (strcmp (cmethod->name, "Abs") == 0) {
opcode = OP_ABSF;
stack_type = STACK_R4;
} else if (strcmp (cmethod->name, "Ceiling") == 0) {
opcode = OP_CEILF;
stack_type = STACK_R4;
} else if (strcmp (cmethod->name, "Floor") == 0) {
opcode = OP_FLOORF;
stack_type = STACK_R4;
} else if (strcmp (cmethod->name, "Sqrt") == 0) {
opcode = OP_SQRTF;
stack_type = STACK_R4;
} else if (strcmp (cmethod->name, "Truncate") == 0) {
opcode = OP_TRUNCF;
stack_type = STACK_R4;
}
}
}
if (opcode) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = stack_type;
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = args [0]->dreg;
if (fsig->param_count > 1) {
ins->sreg2 = args [1]->dreg;
}
g_assert (fsig->param_count <= 2);
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
}
/*========================= End of Function ========================*/
/**
*
* @brief Decompose opcode into a System z operation
*
* @param[in] @cfg - Compile Control block
* @param[in] @ins - Mono Instruction
*
* Substitute a System z instruction for a Mono operation.
*/
void
mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
{
/*
* Have to rename these to avoid being decomposed normally, since the normal
* decomposition does not work on S390.
*/
switch (ins->opcode) {
case OP_ISUB_OVF:
ins->opcode = OP_S390_ISUB_OVF;
break;
case OP_ISUB_OVF_UN:
ins->opcode = OP_S390_ISUB_OVF_UN;
break;
case OP_IADD_OVF:
ins->opcode = OP_S390_IADD_OVF;
break;
case OP_IADD_OVF_UN:
ins->opcode = OP_S390_IADD_OVF_UN;
break;
case OP_LADD_OVF:
ins->opcode = OP_S390_LADD_OVF;
break;
case OP_LADD_OVF_UN:
ins->opcode = OP_S390_LADD_OVF_UN;
break;
case OP_LSUB_OVF:
ins->opcode = OP_S390_LSUB_OVF;
break;
case OP_LSUB_OVF_UN:
ins->opcode = OP_S390_LSUB_OVF_UN;
break;
default:
break;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Determine the cost of allocation a variable
*
* @param[in] @cfg - Compile Control block
* @param[in] @vmv - Mono Method Variable
* @returns Cost (hardcoded on s390x to 2)
*
* Determine the cost, in the number of memory references, of the action
* of allocating the variable VMV into a register during global register
* allocation.
*
*/
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
/* FIXME: */
return 2;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific register window flushing
*
* Not applicable for s390x so we just do nothing
*
*/
void
mono_arch_flush_register_windows (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific check if value may be immediate
*
* @param[in] @opcode - Operation code
* @param[in] @imm_opcode - Immediate operation code
* @param[in] @imm - Value to be examined
* @returns True if it is a valid immediate value
*
* Determine if operand qualifies as an immediate value. For s390x
* this is a value in the range -2**32/2**32-1
*
*/
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return s390_is_imm32 (imm);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific patch offset value for AOT
*
* @param[in] @code - Location of code to check
* @returns Offset
*
* Dummy entry point if/when s390x supports AOT.
*/
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return 0;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific returning of register from context
*
* @param[in] @ctx - Mono context
* @param[in] @reg - Register number to be returned
* @returns Contents of the register from the context
*
* Return a register from the context.
*/
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->uc_mcontext.gregs[reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->uc_mcontext.gregs[reg];
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific setting of a register in the context
*
* @param[in] @ctx - Mono context
* @param[in] @reg - Register number to be returned
* @param[in] @val - Value to be set
*
* Set the specified register in the context with the value passed
*/
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->uc_mcontext.gregs[reg] = val;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific returning of the "this" value from context
*
* @param[in] @ctx - Mono context
* @param[in] @code - Current location
* @returns Pointer to the "this" object
*
* Extract register 2 from the context as for s390x this is where the
* this parameter is passed
*/
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer) regs [s390_r2];
}
/*========================= End of Function ========================*/
/**
*
* @brief Delegation trampoline processing
*
* @param[in] @info - Trampoline information
* @param[in] @has_target - Use target from delegation
* @param[in] @param_count - Count of parameters
* @param[in] @aot - AOT indicator
* @returns Next instruction location
*
* Process the delegation trampolines
*/
static guint8 *
get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, MonoMethodSignature *sig, gboolean aot)
{
guint8 *code, *start;
if (has_target) {
int size = 32;
start = code = (guint8 *) mono_global_codeman_reserve (size);
/* Replace the this argument with the target */
s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
s390_lg (code, s390_r2, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, target));
s390_br (code, s390_r1);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
} else {
int size, i, offset = S390_MINIMAL_STACK_SIZE, iReg = s390_r2;
CallInfo *cinfo = get_call_info (NULL, sig);
size = 32 + sig->param_count * 8;
start = code = (guint8 *) mono_global_codeman_reserve (size);
s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
/* slide down the arguments */
for (i = 0; i < sig->param_count; ++i) {
switch(cinfo->args[i].regtype) {
case RegTypeGeneral :
if (iReg < S390_LAST_ARG_REG) {
s390_lgr (code, iReg, (iReg + 1));
} else {
s390_lg (code, iReg, 0, STK_BASE, offset);
}
iReg++;
break;
default :
s390_mvc (code, sizeof(uintptr_t), STK_BASE, offset, STK_BASE, offset+sizeof(uintptr_t));
offset += sizeof(uintptr_t);
}
}
s390_br (code, s390_r1);
g_free (cinfo);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
}
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL);
} else {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
*info = mono_tramp_info_create (name, start, code - start, NULL, NULL);
g_free (name);
}
return start;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific delegation trampolines processing
*
* @returns List of trampolines
*
* Return a list of MonoTrampInfo structures for the delegate invoke impl trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
MonoTrampInfo *info;
get_delegate_invoke_impl (&info, TRUE, 0, TRUE);
res = g_slist_prepend (res, info);
#if 0
for (int i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
get_delegate_invoke_impl (&info, FALSE, NULL, TRUE);
res = g_slist_prepend (res, info);
}
#endif
return res;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific delegation trampoline processing
*
* @param[in] @sig - Method signature
* @param[in] @has_target - Whether delegation contains a target
* @returns Trampoline
*
* Return a pointer to a delegation trampoline
*/
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
/* FIXME: Support more cases */
if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret)))
return NULL;
if (has_target) {
static guint8* cached = NULL;
if (cached)
return cached;
if (mono_ee_features.use_aot_trampolines) {
start = (guint8 *) mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, TRUE, sig, FALSE);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cached = start;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
code = cache [sig->param_count];
if (code)
return code;
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8 *) mono_aot_get_trampoline (name);
g_free (name);
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, FALSE, sig, FALSE);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cache [sig->param_count] = start;
}
return start;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific delegation virtual trampoline processing
*
* @param[in] @sig - Method signature
* @param[in] @method - Method
* @param[in] @offset - Offset into vtable
* @param[in] @load_imt_reg - Whether to load the LMT register
* @returns Trampoline
*
* Return a pointer to a delegation virtual trampoline
*/
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method,
int offset, gboolean load_imt_reg)
{
guint8 *code, *start;
int size = 40;
start = code = (guint8 *) mono_global_codeman_reserve (size);
/*
* Replace the "this" argument with the target
*/
s390_lgr (code, s390_r1, s390_r2);
s390_lg (code, s390_r2, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, target));
/*
* Load the IMT register, if needed
*/
if (load_imt_reg) {
s390_lg (code, MONO_ARCH_IMT_REG, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, method));
}
/*
* Load the vTable
*/
s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET(MonoObject, vtable));
if (offset != 0) {
s390_agfi(code, s390_r1, offset);
}
s390_lg (code, s390_r1, 0, s390_r1, 0);
s390_br (code, s390_r1);
mono_arch_flush_icache (start, code - start);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
return(start);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific build of IMT trampoline
*
* @param[in] @vtable - Mono VTable
* @param[in] @domain - Mono Domain
* @param[in] @imt_entries - List of IMT check items
* @param[in] @count - Count of items
* @param[in] @fail_tramp - Pointer to a failure trampoline
* @returns Trampoline
*
* Return a pointer to an IMT trampoline
*/
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable,
MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int i;
int size = 0;
guchar *code, *start;
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done)
item->chunk_size += CMP_SIZE + JUMP_SIZE;
if (item->has_target_code)
item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE;
else
item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE +
LOAD_SIZE;
} else {
if (fail_tramp) {
item->chunk_size += CMP_SIZE + 2 * BR_SIZE + JUMP_SIZE +
2 * LOADCON_SIZE;
if (!item->has_target_code)
item->chunk_size += LOAD_SIZE;
} else {
item->chunk_size += LOADCON_SIZE + LOAD_SIZE + BR_SIZE;
#if ENABLE_WRONG_METHOD_CHECK
item->chunk_size += CMP_SIZE + JUMP_SIZE;
#endif
}
}
} else {
item->chunk_size += CMP_SIZE + JUMP_SIZE;
imt_entries [item->check_target_idx]->compare_done = TRUE;
}
size += item->chunk_size;
}
if (fail_tramp) {
code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size);
} else {
code = mono_mem_manager_code_reserve (mem_manager, size);
}
start = code;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = (guint8 *) code;
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
S390_SET (code, s390_r0, item->key);
s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG);
}
item->jmp_code = (guint8*) code;
s390_jcl (code, S390_CC_NE, 0);
if (item->has_target_code) {
S390_SET (code, s390_r1, item->value.target_code);
} else {
S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot])));
s390_lg (code, s390_r1, 0, s390_r1, 0);
}
s390_br (code, s390_r1);
} else {
if (fail_tramp) {
gint64 target;
S390_SET (code, s390_r0, item->key);
s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG);
item->jmp_code = (guint8*) code;
s390_jcl (code, S390_CC_NE, 0);
if (item->has_target_code) {
S390_SET (code, s390_r1, item->value.target_code);
} else {
g_assert (vtable);
S390_SET (code, s390_r1,
(&(vtable->vtable [item->value.vtable_slot])));
s390_lg (code, s390_r1, 0, s390_r1, 0);
}
s390_br (code, s390_r1);
target = (gint64) S390_RELATIVE(code, item->jmp_code);
s390_patch_rel(item->jmp_code+2, target);
S390_SET (code, s390_r1, fail_tramp);
s390_br (code, s390_r1);
item->jmp_code = NULL;
} else {
/* enable the commented code to assert on wrong method */
#if ENABLE_WRONG_METHOD_CHECK
g_assert_not_reached ();
#endif
S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot])));
s390_lg (code, s390_r1, 0, s390_r1, 0);
s390_br (code, s390_r1);
}
}
} else {
S390_SET (code, s390_r0, item->key);
s390_cgr (code, MONO_ARCH_IMT_REG, s390_r0);
item->jmp_code = (guint8 *) code;
s390_jcl (code, S390_CC_GE, 0);
}
}
/*
* patch the branches to get to the target items
*/
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code) {
if (item->check_target_idx) {
gint64 offset;
offset = (gint64) S390_RELATIVE(imt_entries [item->check_target_idx]->code_target,
item->jmp_code);
s390_patch_rel ((guchar *) item->jmp_code + 2, (guint64) offset);
}
}
}
mono_arch_flush_icache ((guint8*)start, (code - start));
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
if (!fail_tramp)
UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
g_assert (code - start <= size);
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), mem_manager);
return (start);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return of pointer to IMT method
*
* @param[in] @regs - Context registers
* @param[in] @code - Current location
* @returns Pointer to IMT method
*
* Extract the value of the IMT register from the context
*/
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return ((MonoMethod *) regs [MONO_ARCH_IMT_REG]);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return of pointer static call vtable.
*
* @param[in] @regs - Context registers
* @param[in] @code - Current location
* @returns Pointer to static call vtable
*
* Extract the value of the RGCTX register from the context which
* points to the static call vtable.
*/
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*)(gsize) regs [MONO_ARCH_RGCTX_REG];
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return of unwind bytecode for DWARF CIE
*
* @returns Unwind byte code
*
* Returns the unwind bytecode for DWARF CIE
*/
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, 0, 0, STK_BASE, S390_CFA_OFFSET);
return(l);
}
/*========================= End of Function ========================*/
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
/**
*
* @brief Architecture-specific setting of a breakpoint
*
* @param[in] @ji - Mono JIT Information
* @param[in] @ip - Insruction pointer
*
* Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
* The location should contain code emitted by OP_SEQ_POINT.
*/
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *bp = ip;
/* IP should point to a LGHI R1,0 */
g_assert (bp[0] == 0xa7);
/* Replace it with a LGHI R1,1 */
s390_lghi (bp, s390_r1, 1);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific clearing of a breakpoint
*
* @param[in] @ji - Mono JIT Information
* @param[in] @ip - Insruction pointer
*
* Replace the breakpoint with a no-operation.
*/
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *bp = ip;
/* IP should point to a LGHI R1,1 */
g_assert (bp[0] == 0xa7);
/* Replace it with a LGHI R1,0 */
s390_lghi (bp, s390_r1, 0);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check if this is a breakpoint event
*
* @param[in] @info - Signal information
* @param[in] @sigctx - Signal context
* @returns True if this is a breakpoint event
*
* We use soft breakpoints so always return FALSE
*/
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
/* We use soft breakpoints on s390x */
return FALSE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific skip of a breakpoint
*
* @param[in] @ctx - Mono Context
* @param[in] @ji - Mono JIT information
*
* We use soft breakpoints so this is a no-op
*/
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
g_assert_not_reached ();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific start of single stepping
*
* Unprotect the trigger page to enable single stepping
*/
void
mono_arch_start_single_stepping (void)
{
ss_trampoline = mini_get_single_step_trampoline();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific stop of single stepping
*
* Write-protect the trigger page to disable single stepping
*/
void
mono_arch_stop_single_stepping (void)
{
ss_trampoline = NULL;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check if single stepping event
*
* @param[in] @info - Signal information
* @param[in] @sigctx - Signal context
* @returns True if this is a single stepping event
*
* Return whether the machine state in sigctx corresponds to a single step event.
* On s390x we use soft breakpoints so return FALSE
*/
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
/* We use soft breakpoints on s390x */
return FALSE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific skip of a single stepping event
*
* @param[in] @ctx - Mono Context
*
* Modify the ctx so the IP is placed after the single step trigger
* instruction, so that the instruction is not executed again.
* On s390x we use soft breakpoints so we shouldn't get here
*/
void
mono_arch_skip_single_step (MonoContext *ctx)
{
g_assert_not_reached();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific creation of sequence point information
*
* @param[in] @domain - Mono Domain
* @param[in] @code - Current location pointer
* @returns Sequence Point Information
*
* Return a pointer to a data struction which is used by the sequence
* point implementation in AOTed code. A no-op on s390x until AOT is
* ever supported.
*/
SeqPointInfo *
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
jit_mm = get_default_jit_mm ();
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
// FIXME: Optimize the size
info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer)));
info->ss_tramp_addr = &ss_trampoline;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
/*========================= End of Function ========================*/
#endif
/**
*
* @brief Architecture-specific check of supported operation codes
*
* @param[in] @opcode - Operation code to be checked
* @returns True if operation code is supported
*
* Check if a mono operation is supported in hardware.
*/
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
return TRUE;
default:
return FALSE;
}
}
/*========================= End of Function ========================*/
#ifndef DISABLE_JIT
/**
*
* @brief Architecture-specific check of tailcall support
*
* @param[in] @cfg - Mono Compile control block
* @param[in] @caller_sig - Signature of caller
* @param[in] @callee_sig - Signature of callee
* @param[in] @virtual_ - Whether this a virtual call
* @returns True if the tailcall operation is supported
*
* Check if a tailcall may be made from caller to callee based on a
* number of conditions including parameter types and stack sizes
*/
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
g_assert (caller_sig);
g_assert (callee_sig);
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage);
// Any call that would result in parameters being placed on the stack cannot be "tailed" as it may
// result in the callers parameter variables being overwritten.
ArgInfo const * const ainfo = callee_info->args + callee_sig->hasthis;
for (int i = 0; res && i < callee_sig->param_count; ++i) {
switch(ainfo[i].regtype) {
case RegTypeGeneral :
// R6 is both used as argument register and call-saved
// This means we cannot use a tail call if R6 is needed
if (ainfo[i].reg == S390_LAST_ARG_REG)
res = FALSE;
else
res = TRUE;
break;
case RegTypeFP :
case RegTypeFPR4 :
case RegTypeStructByValInFP :
res = TRUE;
break;
case RegTypeBase :
res = FALSE;
break;
case RegTypeStructByAddr :
if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG)
res = FALSE;
else
res = TRUE;
break;
case RegTypeStructByVal :
if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG)
res = FALSE;
else {
switch(ainfo[i].size) {
case 0: case 1: case 2: case 4: case 8:
res = TRUE;
break;
default:
res = FALSE;
}
}
break;
}
}
g_free (caller_info);
g_free (callee_info);
return(res);
}
/*========================= End of Function ========================*/
#endif
/**
*
* @brief Architecture-specific load function
*
* @param[in] @jit_call_id - JIT callee identifier
* @returns Pointer to load function trampoline
*
* A no-operation on s390x until if/when it supports AOT.
*/
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
return NULL;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit call to thunked code
*
* @param[in] @cfg - configuration data
* @param[inout] @code - where to emit call
* @param[in] @call - call instruction
* @returns Pointer to next code area
*
*/
static __inline__ guint8*
emit_call (MonoCompile *cfg, guint8 *code, MonoJumpInfoType type, gconstpointer target)
{
mono_add_patch_info_rel (cfg, code-cfg->native_code, type,
target, MONO_R_S390_THUNKED);
S390_CALL_TEMPLATE (code, s390_r14);
cfg->thunk_area += THUNK_SIZE;
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit thunk for an indirect call
*
* @param[inout] @code - where to emit thunk
* @param[in] @target - thunk target
* @returns Pointer to next code area
*
*/
static guint8*
emit_thunk (guint8 *code, gconstpointer target)
{
*(guint64*)code = (guint64)target;
code += sizeof (guint64);
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Create thunk
*
* @param[in] @cfg - Compiler configuration
* @param[inout] @code - where to emit thunk
* @param[in] @target - thunk target
*
* Create a new thunk
*
*/
static void
create_thunk (MonoCompile *cfg, guint8 *ip, guint8 *code, gpointer target)
{
guint8 *thunks;
int thunks_size;
/*
* This can be called multiple times during JITting,
* save the current position in cfg->arch to avoid
* doing a O(n^2) search.
*/
if (!cfg->arch.thunks) {
cfg->arch.thunks = cfg->thunks;
cfg->arch.thunks_size = cfg->thunk_area;
}
thunks = (guint8 *) cfg->arch.thunks;
thunks_size = cfg->arch.thunks_size;
if (!thunks_size) {
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
g_assert_not_reached ();
}
g_assert (*(guint64 *)thunks == 0);
emit_thunk (thunks, target);
cfg->arch.thunks += THUNK_SIZE;
cfg->arch.thunks_size -= THUNK_SIZE;
S390_EMIT_CALL(ip, thunks);
}
/*========================= End of Function ========================*/
/**
*
* @brief Update thunk
*
* @param[in] @cfg - Compiler configuration
* @param[inout] @code - where to emit thunk
* @param[in] @target - thunk target
*
* Update an existing thunk
*
*/
static void
update_thunk (MonoCompile *cfg, guint8 *code, gpointer target)
{
MonoJitInfo *ji;
MonoThunkJitInfo *info;
guint8 *thunks;
guint8 *orig_target;
guint8 *target_thunk;
int thunks_size;
ji = mini_jit_info_table_find ((char*)code);
g_assert (ji);
info = mono_jit_info_get_thunk_info (ji);
g_assert (info);
thunks = (guint8*)ji->code_start + info->thunks_offset;
thunks_size = info->thunks_size;
/*
* We're pointing at the start of jump to thunk,
* but mono_arch_get_call_target expects we're pointing
* after the branch so we adjust
*/
orig_target = mono_arch_get_call_target (code + 6);
target_thunk = NULL;
if (orig_target >= thunks && orig_target < thunks + thunks_size) {
/* The call already points to a thunk, because of trampolines etc. */
target_thunk = orig_target;
} else {
g_print ("thunk failed %p->%p, thunk space=%d method %s",
code, target, thunks_size,
cfg ? mono_method_full_name (cfg->method, TRUE)
: mono_method_full_name (jinfo_get_method (ji), TRUE));
g_assert_not_reached ();
}
emit_thunk (target_thunk, target);
}
/*========================= End of Function ========================*/
| /**
* @file
* @author - Neale Ferguson ([email protected])
*
* @section description
* Function - S/390 backend for the Mono code generator.
*
* Date - January, 2004
*
* Derivation - From mini-x86 & mini-ppc by -
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
*/
/*------------------------------------------------------------------*/
/* D e f i n e s */
/*------------------------------------------------------------------*/
#define MAX_ARCH_DELEGATE_PARAMS 10
#define EMIT_COND_BRANCH(ins,cond) \
{ \
if (ins->inst_true_bb->native_offset) { \
int displace; \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_brc (code, cond, displace); \
} else { \
s390_jcl (code, cond, displace); \
} \
} else { \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_true_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, cond, 0); \
} \
}
#define EMIT_UNCOND_BRANCH(ins) \
{ \
if (ins->inst_target_bb->native_offset) { \
int displace; \
displace = ((cfg->native_code + \
ins->inst_target_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_brc (code, S390_CC_UN, displace); \
} else { \
s390_jcl (code, S390_CC_UN, displace); \
} \
} else { \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_target_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, S390_CC_UN, 0); \
} \
}
#define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) \
do { \
mono_add_patch_info (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_EXC, exc_name); \
s390_jcl (code, cond, 0); \
} while (0);
#define EMIT_COMP_AND_BRANCH(ins, cab, cmp) \
{ \
if (ins->inst_true_bb->native_offset) { \
int displace; \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_##cab (code, ins->sreg1, ins->sreg2, \
ins->sreg3, displace); \
} else { \
s390_##cmp (code, ins->sreg1, ins->sreg2); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
s390_jcl (code, ins->sreg3, displace); \
} \
} else { \
s390_##cmp (code, ins->sreg1, ins->sreg2); \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_true_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, ins->sreg3, 0); \
} \
}
#define EMIT_COMP_AND_BRANCH_IMM(ins, cab, cmp, lat, logical) \
{ \
if (ins->inst_true_bb->native_offset) { \
int displace; \
if ((ins->backend.data == 0) && (!logical)) { \
s390_##lat (code, ins->sreg1, ins->sreg1); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_brc (code, ins->sreg3, displace); \
} else { \
s390_jcl (code, ins->sreg3, displace); \
} \
} else { \
S390_SET (code, s390_r0, ins->backend.data); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
if (s390_is_imm16(displace)) { \
s390_##cab (code, ins->sreg1, s390_r0, \
ins->sreg3, displace); \
} else { \
s390_##cmp (code, ins->sreg1, s390_r0); \
displace = ((cfg->native_code + \
ins->inst_true_bb->native_offset) - code) / 2; \
s390_jcl (code, ins->sreg3, displace); \
} \
} \
} else { \
if ((ins->backend.data == 0) && (!logical)) { \
s390_##lat (code, ins->sreg1, ins->sreg1); \
} else { \
S390_SET (code, s390_r0, ins->backend.data); \
s390_##cmp (code, ins->sreg1, s390_r0); \
} \
mono_add_patch_info_rel (cfg, code - cfg->native_code, \
MONO_PATCH_INFO_BB, ins->inst_true_bb, \
MONO_R_S390_RELINS); \
s390_jcl (code, ins->sreg3, 0); \
} \
}
#define CHECK_SRCDST_COM \
if (ins->dreg == ins->sreg2) { \
src2 = ins->sreg1; \
} else { \
src2 = ins->sreg2; \
if (ins->dreg != ins->sreg1) { \
s390_lgr (code, ins->dreg, ins->sreg1); \
} \
}
#define CHECK_SRCDST_NCOM \
if (ins->dreg == ins->sreg2) { \
src2 = s390_r13; \
s390_lgr (code, s390_r13, ins->sreg2); \
} else { \
src2 = ins->sreg2; \
} \
if (ins->dreg != ins->sreg1) { \
s390_lgr (code, ins->dreg, ins->sreg1); \
}
#define CHECK_SRCDST_COM_I \
if (ins->dreg == ins->sreg2) { \
src2 = ins->sreg1; \
} else { \
src2 = ins->sreg2; \
if (ins->dreg != ins->sreg1) { \
s390_lgfr (code, ins->dreg, ins->sreg1); \
} \
}
#define CHECK_SRCDST_NCOM_I \
if (ins->dreg == ins->sreg2) { \
src2 = s390_r13; \
s390_lgfr (code, s390_r13, ins->sreg2); \
} else { \
src2 = ins->sreg2; \
} \
if (ins->dreg != ins->sreg1) { \
s390_lgfr (code, ins->dreg, ins->sreg1); \
}
#define CHECK_SRCDST_COM_F \
if (ins->dreg == ins->sreg2) { \
src2 = ins->sreg1; \
} else { \
src2 = ins->sreg2; \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
}
#define CHECK_SRCDST_NCOM_F(op) \
if (ins->dreg == ins->sreg2) { \
s390_lgdr (code, s390_r0, s390_f15); \
s390_ldr (code, s390_f15, ins->sreg2); \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, s390_f15); \
s390_ldgr (code, s390_f15, s390_r0); \
} else { \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, ins->sreg2); \
}
#define CHECK_SRCDST_NCOM_FR(op, m) \
s390_lgdr (code, s390_r1, s390_f14); \
if (ins->dreg == ins->sreg2) { \
s390_lgdr (code, s390_r0, s390_f15); \
s390_ldr (code, s390_f15, ins->sreg2); \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, s390_f15, m, s390_f14); \
s390_ldgr (code, s390_f15, s390_r0); \
} else { \
if (ins->dreg != ins->sreg1) { \
s390_ldr (code, ins->dreg, ins->sreg1); \
} \
s390_ ## op (code, ins->dreg, ins->sreg2, m, s390_f14); \
} \
s390_ldgr (code, s390_f14, s390_r1);
#undef DEBUG
#define DEBUG(a) if (cfg->verbose_level > 1) a
#define MAX_EXC 16
#define S390_TRACE_STACK_SIZE (5*sizeof(gpointer)+4*sizeof(gdouble))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/*
* imt trampoline size values
*/
#define CMP_SIZE 24
#define LOADCON_SIZE 20
#define LOAD_SIZE 6
#define BR_SIZE 2
#define JUMP_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
/*========================= End of Defines =========================*/
/*------------------------------------------------------------------*/
/* I n c l u d e s */
/*------------------------------------------------------------------*/
#include "mini.h"
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/profiler-private.h>
#include <mono/utils/mono-error.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/unlocked.h>
#include "mini-s390x.h"
#include "cpu-s390x.h"
#include "jit-icalls.h"
#include "ir-emit.h"
#include "mini-gc.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
/*========================= End of Includes ========================*/
/*------------------------------------------------------------------*/
/* T y p e d e f s */
/*------------------------------------------------------------------*/
/**
* Track stack use
*/
typedef struct {
guint stack_size,
code_size,
parm_size,
retStruct;
} size_data;
/**
* ABI - register use in calls etc.
*/
typedef enum {
RegTypeGeneral,
RegTypeBase,
RegTypeFP,
RegTypeFPR4,
RegTypeStructByVal,
RegTypeStructByValInFP,
RegTypeStructByAddr
} ArgStorage;
/**
* Track method arguments
*/
typedef struct {
gint32 offset; /* offset from caller's stack */
guint16 vtsize; /* in param area */
guint8 reg;
ArgStorage regtype;
guint32 size; /* Size of structure used by RegTypeStructByVal */
gint32 type; /* Data type of argument */
} ArgInfo;
/**
* Call information - parameters and stack use for s390x ABI
*/
struct CallInfo {
int nargs;
int lastgr;
guint32 stack_usage;
guint32 struct_ret;
ArgInfo ret;
ArgInfo sigCookie;
size_data sz;
int vret_arg_index;
MonoMethodSignature *sig;
ArgInfo args [1];
};
/**
* Registers used in parameter passing
*/
typedef struct {
gint64 gr[5]; /* R2-R6 */
gdouble fp[3]; /* F0-F2 */
} __attribute__ ((__packed__)) RegParm;
/*========================= End of Typedefs ========================*/
/*------------------------------------------------------------------*/
/* P r o t o t y p e s */
/*------------------------------------------------------------------*/
static guint8 * backUpStackPtr(MonoCompile *, guint8 *);
static void add_general (guint *, size_data *, ArgInfo *);
static void add_stackParm (guint *, size_data *, ArgInfo *, gint, ArgStorage);
static void add_float (guint *, size_data *, ArgInfo *, gboolean);
static CallInfo * get_call_info (MonoMemPool *, MonoMethodSignature *);
static guchar * emit_float_to_int (MonoCompile *, guchar *, int, int, int, gboolean);
static __inline__ void emit_unwind_regs(MonoCompile *, guint8 *, int, int, long);
static void compare_and_branch(MonoBasicBlock *, MonoInst *, int, gboolean);
static __inline__ guint8 * emit_call(MonoCompile *, guint8 *, MonoJumpInfoType, gconstpointer);
static guint8 * emit_thunk(guint8 *, gconstpointer);
static void create_thunk(MonoCompile *, guint8 *, guint8 *, gpointer);
static void update_thunk(MonoCompile *, guint8 *, gpointer);
static void emit_patch_full (MonoCompile *, MonoJumpInfo *, guint8 *, gpointer, int);
/*========================= End of Prototypes ======================*/
/*------------------------------------------------------------------*/
/* G l o b a l V a r i a b l e s */
/*------------------------------------------------------------------*/
/**
* The single-step trampoline
*/
static gpointer ss_trampoline;
/**
* The breakpoint trampoline
*/
static gpointer bp_trampoline;
/**
* Constants used in debugging - map general register names
*/
static const char * grNames[] = {
"s390_r0", "s390_sp", "s390_r2", "s390_r3", "s390_r4",
"s390_r5", "s390_r6", "s390_r7", "s390_r8", "s390_r9",
"s390_r10", "s390_r11", "s390_r12", "s390_r13", "s390_r14",
"s390_r15"
};
/**
* Constants used in debugging - map floating point register names
*/
static const char * fpNames[] = {
"s390_f0", "s390_f1", "s390_f2", "s390_f3", "s390_f4",
"s390_f5", "s390_f6", "s390_f7", "s390_f8", "s390_f9",
"s390_f10", "s390_f11", "s390_f12", "s390_f13", "s390_f14",
"s390_f15"
};
/**
* Constants used in debugging - map vector register names
*/
static const char * vrNames[] = {
"vr0", "vr1", "vr2", "vr3", "vr4", "vr5", "vr6", "vr7",
"vr8", "vr9", "vr10", "vr11", "vr12", "vr13", "vr14", "vr15",
"vr16", "vr17", "vr18", "vr19", "vr20", "vr21", "vr22", "vr23",
"vr24", "vr25", "vr26", "vr27", "vr28", "vr29", "vr30", "vr31"
};
#if 0
/**
* Constants used in debugging - ABI register types
*/
static const char *typeParm[] = { "General", "Base", "FPR8", "FPR4", "StructByVal",
"StructByValInFP", "ByAddr"};
#endif
/*====================== End of Global Variables ===================*/
static GENERATE_TRY_GET_CLASS_WITH_CACHE (math, "System", "Math")
static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf, "System", "MathF")
/**
*
* @brief Return general register name
*
* @param[in] register number
* @returns Name of register
*
* Returns the name of the general register specified by the input parameter.
*/
const char*
mono_arch_regname (int reg)
{
if (reg >= 0 && reg < 16)
return grNames [reg];
else
return "unknown";
}
/*========================= End of Function ========================*/
/**
*
* @brief Return floating point register name
*
* @param[in] register number
* @returns Name of register
*
* Returns the name of the FP register specified by the input parameter.
*/
const char*
mono_arch_fregname (int reg)
{
if (reg >= 0 && reg < 16)
return fpNames [reg];
else
return "unknown";
}
/*========================= End of Function ========================*/
/**
*
* @brief Return vector register name
*
* @param[in] register number
* @returns Name of register
*
* Returns the name of the vector register specified by the input parameter.
*/
const char *
mono_arch_xregname (int reg)
{
if (reg < s390_VR_NREG)
return vrNames [reg];
else
return "unknown";
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return argument information
*
* @param[in] @csig - Method signature
* @param[in] @param_count - Number of parameters to consider
* @param[out] @arg_info - An array in which to store results
* @returns Size of the activation frame
*
* Gathers information on parameters such as size, alignment, and padding.
* arg_info should be large * enough to hold param_count + 1 entries.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig,
int param_count,
MonoJitArgumentInfo *arg_info)
{
int k, frame_size = 0;
int size, align, pad;
int offset = 8;
if (MONO_TYPE_ISSTRUCT (csig->ret)) {
frame_size += sizeof (target_mgreg_t);
offset += 8;
}
arg_info [0].offset = offset;
if (csig->hasthis) {
frame_size += sizeof (target_mgreg_t);
offset += 8;
}
arg_info [0].size = frame_size;
for (k = 0; k < param_count; k++) {
if (csig->pinvoke && !csig->marshalling_disabled)
size = mono_type_native_stack_size (csig->params [k], (guint32 *) &align);
else
size = mini_type_stack_size (csig->params [k], &align);
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
frame_size += size;
arg_info [k + 1].pad = 0;
arg_info [k + 1].size = size;
offset += pad;
arg_info [k + 1].offset = offset;
offset += size;
}
align = MONO_ARCH_FRAME_ALIGNMENT;
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
return frame_size;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit an s390x move operation
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @dr - Destination register
* @param[in] @ins - Current instruction
* @param[in] @src - Instruction representing the source of move
*
* Emit a move instruction for VT parameters
*/
static void __inline__
emit_new_move(MonoCompile *cfg, int dr, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst *) ins->inst_p0;
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL);
MonoInst *load;
MonoInst *move;
int size;
if (call->signature->pinvoke && !call->signature->marshalling_disabled) {
size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL);
vtcopy->backend.is_pinvoke = 1;
} else {
size = ins->backend.size;
}
EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
MONO_INST_NEW (cfg, move, OP_S390_MOVE);
move->sreg2 = load->dreg;
move->inst_offset = 0;
move->sreg1 = src->dreg;
move->inst_imm = 0;
move->backend.size = size;
MONO_ADD_INS (cfg->cbb, move);
if (dr != 0)
MONO_EMIT_NEW_UNALU(cfg, OP_MOVE, dr, load->dreg);
else
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, load->dreg);
}
/*========================= End of Function ========================*/
/**
*
* @brief Generate output sequence for VT register parameters
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @dr - Destination register
* @param[in] @ins - Current instruction
* @param[in] @src - Instruction representing the source
*
* Emit the output of structures for calls whose address is placed in a register.
*/
static void __inline__
emit_outarg_vtr(MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst *) ins->inst_p0;
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
int reg = mono_alloc_preg (cfg);
switch (ins->backend.size) {
case 0:
MONO_EMIT_NEW_ICONST(cfg, reg, 0);
break;
case 1:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE,
reg, src->dreg, 0);
break;
case 2:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE,
reg, src->dreg, 0);
break;
case 4:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE,
reg, src->dreg, 0);
break;
case 8:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE,
reg, src->dreg, 0);
break;
default:
emit_new_move (cfg, reg, ins, src);
}
mono_call_inst_add_outarg_reg(cfg, call, reg, ainfo->reg, FALSE);
}
/*========================= End of Function ========================*/
/**
*
* @brief Generate output sequence for VT stack parameters
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @dr - Destination register
* @param[in] @ins - Current instruction
* @param[in] @src - Instruction representing the source
*
* Emit the output of structures for calls whose address is placed on the stack
*/
static void __inline__
emit_outarg_vts(MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
int tmpr = mono_alloc_preg (cfg);
switch (ins->backend.size) {
case 0:
MONO_EMIT_NEW_ICONST(cfg, tmpr, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 1:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 2:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 4:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
case 8:
MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE,
tmpr, src->dreg, 0);
MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG,
ainfo->reg, ainfo->offset, tmpr);
break;
default: {
emit_new_move (cfg, 0, ins, src);
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Generate unwind information for range of registers
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @code - Location of code
* @param[in] @start - Starting register
* @param[in] @end - Ending register
* @param[in] @offset - Offset in stack
*
* Emit unwind information for a range of registers.
*/
static void __inline__
emit_unwind_regs(MonoCompile *cfg, guint8 *code, int start, int end, long offset)
{
int i;
for (i = start; i <= end; i++) {
mono_emit_unwind_op_offset (cfg, code, i, offset);
mini_gc_set_slot_type_from_cfa (cfg, offset, SLOT_NOREF);
offset += sizeof(gulong);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Get previous stack frame pointer
*
* @param[in] @cfg - MonoCompile control block
* @param[in] @code - Location of code
* @returns Previous stack pointer
*
* Retrieve the stack pointer of the previous frame
*/
static guint8 *
backUpStackPtr(MonoCompile *cfg, guint8 *code)
{
int stackSize = cfg->stack_usage;
if (cfg->flags & MONO_CFG_HAS_ALLOCA) {
s390_lg (code, STK_BASE, 0, STK_BASE, 0);
} else {
if (cfg->frame_reg != STK_BASE)
s390_lgr (code, STK_BASE, cfg->frame_reg);
if (s390_is_imm16 (stackSize)) {
s390_aghi (code, STK_BASE, stackSize);
} else if (s390_is_imm32 (stackSize)) {
s390_agfi (code, STK_BASE, stackSize);
} else {
while (stackSize > INT_MAX) {
s390_aghi (code, STK_BASE, INT_MAX);
stackSize -= INT_MAX;
}
s390_agfi (code, STK_BASE, stackSize);
}
}
return (code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific CPU initialization
*
* Perform CPU specific initialization to execute managed code.
*/
void
mono_arch_cpu_init (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Archictecture specific initialization
*
*
* Initialize architecture specific code:
* - Define trigger pages for debugger
* - Generate breakpoint code stub
*/
void
mono_arch_init (void)
{
mono_set_partial_sharing_supported (FALSE);
if (!mono_aot_only)
bp_trampoline = mini_get_breakpoint_trampoline();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific cleaup code
*
*
* Clean up before termination:
* - Free the trigger pages
*/
void
mono_arch_cleanup (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check for fast TLS access
*
* @returns True
*
* Returns whether we use fast inlined thread local storage managed access,
* instead of falling back to native code.
*/
gboolean
mono_arch_have_fast_tls (void)
{
return TRUE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check of mono optimizations
*
* @param[out] @exclude_mask - Optimization exclusion mask
* @returns Optimizations supported on this CPU
*
* Returns the optimizations supported on this CPU
*/
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
guint32 opts = 0;
/*
* No s390-specific optimizations yet
*/
*exclude_mask = 0;
return opts;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific allocation of integer variables
*
* @param[in] @cfg - MonoCompile control block
* @returns A list of integer variables
*
* Returns a list of allocatable integer variables
*/
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
int i;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
/* we can only allocate 32 bit values */
if (mono_is_regsize_var(ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
}
}
return vars;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific determination of usable integer registers
*
* @param[in] @cfg - MonoCompile control block
* @returns A list of allocatable registers
*
* Returns a list of usable integer registers
*/
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
MonoMethodHeader *header;
int i, top = 13;
header = cfg->header;
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
cfg->frame_reg = s390_r11;
/* FIXME: s390_r12 is reserved for bkchain_reg. Only reserve it if needed */
top = 12;
for (i = 8; i < top; ++i) {
if ((cfg->frame_reg != i) &&
//!((cfg->uses_rgctx_reg) && (i == MONO_ARCH_IMT_REG)))
(i != MONO_ARCH_IMT_REG))
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
}
return regs;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific flush of instruction cache
*
* @param[in] @code - Start of code
* @param[in] @size - Amount to be flushed
*
* Flush the CPU icache.
*/
void
mono_arch_flush_icache (guint8 *code, gint size)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Add an integer register parameter
*
* @param[in] @gr - Address of current register number
* @param[in] @sz - Stack size data
* @param[in] @ainfo - Parameter information
*
* Assign a parameter to a general register or spill it onto the stack
*/
static void inline
add_general (guint *gr, size_data *sz, ArgInfo *ainfo)
{
if (*gr > S390_LAST_ARG_REG) {
sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long));
ainfo->offset = sz->stack_size;
ainfo->reg = STK_BASE;
ainfo->regtype = RegTypeBase;
sz->stack_size += sizeof(long);
sz->code_size += 12;
} else {
ainfo->reg = *gr;
ainfo->regtype = RegTypeGeneral;
sz->code_size += 8;
}
(*gr) ++;
}
/*========================= End of Function ========================*/
/**
*
* @brief Add a structure variable to parameter list
*
* @param[in] @gr - Address of current register number
* @param[in] @sz - Stack size data
* @param[in] @ainfo - Parameter information
* @param[in] @size - Size of parameter
* @param[in] @type - Type of stack parameter (reference or value)
*
* Assign a structure address to a register or spill it onto the stack
*/
static void inline
add_stackParm (guint *gr, size_data *sz, ArgInfo *ainfo, gint size, ArgStorage type)
{
if (*gr > S390_LAST_ARG_REG) {
sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long));
ainfo->reg = STK_BASE;
ainfo->offset = sz->stack_size;
sz->stack_size += sizeof (target_mgreg_t);
sz->parm_size += sizeof(gpointer);
} else {
ainfo->reg = *gr;
}
(*gr) ++;
ainfo->regtype = type;
ainfo->size = size;
ainfo->vtsize = size;
sz->parm_size += size;
}
/*========================= End of Function ========================*/
/**
*
* @brief Add a floating point register parameter
*
* @param[in] @fr - Address of current register number
* @param[in] @sz - Stack size data
* @param[in] @ainfo - Parameter information
* @param[in] @isDouble - Precision of parameter
*
* Assign a parameter to a FP register or spill it onto the stack
*/
static void inline
add_float (guint *fr, size_data *sz, ArgInfo *ainfo, gboolean isDouble)
{
if ((*fr) <= S390_LAST_FPARG_REG) {
if (isDouble)
ainfo->regtype = RegTypeFP;
else
ainfo->regtype = RegTypeFPR4;
ainfo->reg = *fr;
sz->code_size += 4;
(*fr) += 2;
}
else {
ainfo->offset = sz->stack_size;
ainfo->reg = STK_BASE;
sz->code_size += 4;
sz->stack_size += sizeof(double);
ainfo->regtype = RegTypeBase;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Extract information about call parameters and stack use
*
* @param[in] @mp - Mono Memory Pool
* @param[in] @sig - Mono Method Signature
* @returns Information about the parameters and stack usage for a call
*
* Determine the amount of space required for code and stack. In addition
* determine starting points for stack-based parameters, and area for
* structures being returned on the stack.
*/
static CallInfo *
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
guint i, fr, gr, size, pstart;
int nParm = sig->hasthis + sig->param_count;
MonoType *ret_type;
guint32 simpleType, align;
gboolean is_pinvoke = sig->pinvoke;
CallInfo *cinfo;
size_data *sz;
if (mp)
cinfo = (CallInfo *) mono_mempool_alloc0 (mp, sizeof (CallInfo) + sizeof (ArgInfo) * nParm);
else
cinfo = (CallInfo *) g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * nParm);
fr = 0;
gr = s390_r2;
nParm = 0;
cinfo->struct_ret = 0;
cinfo->sig = sig;
sz = &cinfo->sz;
sz->retStruct = 0;
sz->stack_size = S390_MINIMAL_STACK_SIZE;
sz->code_size = 0;
sz->parm_size = 0;
align = 0;
size = 0;
/*----------------------------------------------------------*/
/* We determine the size of the return code/stack in case we*/
/* need to reserve a register to be used to address a stack */
/* area that the callee will use. */
/*----------------------------------------------------------*/
ret_type = mini_get_underlying_type (sig->ret);
simpleType = ret_type->type;
enum_retvalue:
switch (simpleType) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_OBJECT:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_STRING:
cinfo->ret.reg = s390_r2;
sz->code_size += 4;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
cinfo->ret.reg = s390_f0;
sz->code_size += 4;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
cinfo->ret.reg = s390_r2;
sz->code_size += 4;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
cinfo->ret.reg = s390_r2;
sz->code_size += 4;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE: {
MonoClass *klass = mono_class_from_mono_type_internal (sig->ret);
if (m_class_is_enumtype (klass)) {
simpleType = mono_class_enum_basetype_internal (klass)->type;
goto enum_retvalue;
}
size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled);
cinfo->struct_ret = 1;
cinfo->ret.size = size;
cinfo->ret.vtsize = size;
break;
}
case MONO_TYPE_TYPEDBYREF: {
MonoClass *klass = mono_class_from_mono_type_internal (sig->ret);
size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled);
cinfo->struct_ret = 1;
cinfo->ret.size = size;
cinfo->ret.vtsize = size;
}
break;
case MONO_TYPE_VOID:
break;
default:
g_error ("Can't handle as return value 0x%x", sig->ret->type);
}
pstart = 0;
/*
* To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
* the first argument, allowing 'this' to be always passed in the first arg reg.
* Also do this if the first argument is a reference type, since virtual calls
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
if (cinfo->struct_ret && !is_pinvoke &&
(sig->hasthis ||
(sig->param_count > 0 &&
MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
cinfo->args[nParm].size = sizeof (target_mgreg_t);
add_general (&gr, sz, cinfo->args + nParm);
} else {
cinfo->args[nParm].size = sizeof (target_mgreg_t);
add_general (&gr, sz, &cinfo->args [sig->hasthis + nParm]);
pstart = 1;
}
nParm ++;
cinfo->vret_arg_index = 1;
cinfo->ret.reg = gr;
gr ++;
} else {
/* this */
if (sig->hasthis) {
cinfo->args[nParm].size = sizeof (target_mgreg_t);
add_general (&gr, sz, cinfo->args + nParm);
nParm ++;
}
if (cinfo->struct_ret) {
cinfo->ret.reg = gr;
gr++;
}
}
if ((sig->call_convention == MONO_CALL_VARARG) && (sig->param_count == 0)) {
gr = S390_LAST_ARG_REG + 1;
fr = S390_LAST_FPARG_REG + 1;
/* Emit the signature cookie just before the implicit arguments */
add_general (&gr, sz, &cinfo->sigCookie);
}
/*----------------------------------------------------------*/
/* We determine the size of the parameter code and stack */
/* requirements by checking the types and sizes of the */
/* parameters. */
/*----------------------------------------------------------*/
for (i = pstart; i < sig->param_count; ++i) {
MonoType *ptype;
/*--------------------------------------------------*/
/* Handle vararg type calls. All args are put on */
/* the stack. */
/*--------------------------------------------------*/
if ((sig->call_convention == MONO_CALL_VARARG) &&
(i == sig->sentinelpos)) {
gr = S390_LAST_ARG_REG + 1;
fr = S390_LAST_FPARG_REG + 1;
add_general (&gr, sz, &cinfo->sigCookie);
}
if (m_type_is_byref (sig->params [i])) {
add_general (&gr, sz, cinfo->args+nParm);
cinfo->args[nParm].size = sizeof(gpointer);
nParm++;
continue;
}
ptype = mini_get_underlying_type (sig->params [i]);
simpleType = ptype->type;
cinfo->args[nParm].type = simpleType;
switch (simpleType) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
cinfo->args[nParm].size = sizeof(char);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
cinfo->args[nParm].size = sizeof(short);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
cinfo->args[nParm].size = sizeof(int);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
cinfo->args[nParm].size = sizeof(gpointer);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
cinfo->args[nParm].size = sizeof(long long);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
case MONO_TYPE_R4:
cinfo->args[nParm].size = sizeof(float);
add_float (&fr, sz, cinfo->args+nParm, FALSE);
nParm++;
break;
case MONO_TYPE_R8:
cinfo->args[nParm].size = sizeof(double);
add_float (&fr, sz, cinfo->args+nParm, TRUE);
nParm++;
break;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (ptype)) {
cinfo->args[nParm].size = sizeof(gpointer);
add_general (&gr, sz, cinfo->args+nParm);
nParm++;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE: {
MonoMarshalType *info;
MonoClass *klass = mono_class_from_mono_type_internal (ptype);
if (sig->pinvoke && !sig->marshalling_disabled)
size = mono_class_native_size(klass, NULL);
else
size = mono_class_value_size(klass, NULL);
if (simpleType != MONO_TYPE_GENERICINST) {
info = mono_marshal_load_type_info(klass);
if ((info->native_size == sizeof(float)) &&
(info->num_fields == 1) &&
(info->fields[0].field->type->type == MONO_TYPE_R4)) {
cinfo->args[nParm].size = sizeof(float);
add_float(&fr, sz, cinfo->args+nParm, FALSE);
nParm ++;
break;
}
if ((info->native_size == sizeof(double)) &&
(info->num_fields == 1) &&
(info->fields[0].field->type->type == MONO_TYPE_R8)) {
cinfo->args[nParm].size = sizeof(double);
add_float(&fr, sz, cinfo->args+nParm, TRUE);
nParm ++;
break;
}
}
cinfo->args[nParm].vtsize = 0;
cinfo->args[nParm].size = 0;
switch (size) {
/*----------------------------------*/
/* On S/390, structures of size 1, */
/* 2, 4, and 8 bytes are passed in */
/* (a) register(s). */
/*----------------------------------*/
case 0:
case 1:
case 2:
case 4:
case 8:
add_general(&gr, sz, cinfo->args+nParm);
cinfo->args[nParm].size = size;
cinfo->args[nParm].regtype = RegTypeStructByVal;
nParm++;
break;
default:
add_stackParm(&gr, sz, cinfo->args+nParm, size, RegTypeStructByVal);
nParm++;
}
}
break;
case MONO_TYPE_TYPEDBYREF: {
add_stackParm(&gr, sz, cinfo->args+nParm, sizeof(uintptr_t), RegTypeStructByAddr);
nParm++;
}
break;
default:
g_error ("Can't trampoline 0x%x", ptype);
}
}
/*----------------------------------------------------------*/
/* Handle the case where there are no implicit arguments */
/*----------------------------------------------------------*/
if ((sig->call_convention == MONO_CALL_VARARG) &&
(nParm > 0) &&
(!sig->pinvoke) &&
(sig->param_count == sig->sentinelpos)) {
gr = S390_LAST_ARG_REG + 1;
fr = S390_LAST_FPARG_REG + 1;
add_general (&gr, sz, &cinfo->sigCookie);
}
/*
* If we are passing a structure back then we make room at
* the end of the parameters that may have been placed on
* the stack
*/
if (cinfo->struct_ret) {
cinfo->ret.offset = sz->stack_size;
sz->stack_size += S390_ALIGN(cinfo->ret.size, align);
}
cinfo->lastgr = gr;
sz->stack_size = sz->stack_size + sz->parm_size;
sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long));
return (cinfo);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific allocation of variables
*
* @param[in] @cfg - Compile control block
*
* Set var information according to the calling convention for s390x.
*
*/
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
MonoInst *inst;
CallInfo *cinfo;
int iParm, iVar, offset, align, size, curinst;
int frame_reg = STK_BASE;
int sArg, eArg;
header = cfg->header;
cfg->flags |= MONO_CFG_HAS_SPILLUP;
/*---------------------------------------------------------*/
/* We use the frame register also for any method that has */
/* filter clauses. This way, when the handlers are called, */
/* the code will reference local variables using the frame */
/* reg instead of the stack pointer: if we had to restore */
/* the stack pointer, we'd corrupt the method frames that */
/* are already on the stack (since filters get called */
/* before stack unwinding happens) when the filter code */
/* would call any method. */
/*---------------------------------------------------------*/
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
frame_reg = s390_r11;
cfg->frame_reg = frame_reg;
cfg->arch.bkchain_reg = -1;
if (frame_reg != STK_BASE)
cfg->used_int_regs |= (1LL << frame_reg);
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*--------------------------------------------------------------*/
/* local vars are at a positive offset from the stack pointer */
/* also note that if the function uses alloca, we use s390_r11 */
/* to point at the local variables. */
/* add parameter area size for called functions */
/*--------------------------------------------------------------*/
if (cfg->param_area == 0)
offset = S390_MINIMAL_STACK_SIZE;
else
offset = cfg->param_area;
cfg->sig_cookie = 0;
if (MONO_TYPE_ISSTRUCT(sig->ret)) {
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg;
} else {
switch (mini_get_underlying_type (sig->ret)->type) {
case MONO_TYPE_VOID:
break;
default:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cfg->ret->dreg = cinfo->ret.reg;
}
}
if (sig->hasthis) {
inst = cfg->args [0];
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
offset = S390_ALIGN(offset, sizeof(gpointer));
inst->inst_offset = offset;
offset += sizeof (target_mgreg_t);
}
curinst = sArg = 1;
} else {
curinst = sArg = 0;
}
eArg = sig->param_count + sArg;
if (sig->call_convention == MONO_CALL_VARARG)
cfg->sig_cookie += S390_MINIMAL_STACK_SIZE;
for (iParm = sArg; iParm < eArg; ++iParm) {
inst = cfg->args [curinst];
if (inst->opcode != OP_REGVAR) {
switch (cinfo->args[iParm].regtype) {
case RegTypeStructByAddr : {
MonoInst *indir;
size = sizeof (target_mgreg_t);
if (cinfo->args [iParm].reg == STK_BASE) {
/* Similar to the == STK_BASE case below */
cfg->arch.bkchain_reg = s390_r12;
cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
inst->opcode = OP_REGOFFSET;
inst->dreg = mono_alloc_preg (cfg);
inst->inst_basereg = cfg->arch.bkchain_reg;
inst->inst_offset = cinfo->args [iParm].offset;
} else {
inst->opcode = OP_REGOFFSET;
inst->dreg = cinfo->args [iParm].reg;
inst->opcode = OP_REGOFFSET;
inst->dreg = mono_alloc_preg (cfg);
inst->inst_basereg = cfg->frame_reg;
// inst->inst_offset = cinfo->args [iParm].offset;
inst->inst_offset = offset;
}
/* Add a level of indirection */
MONO_INST_NEW (cfg, indir, 0);
*indir = *inst;
inst->opcode = OP_VTARG_ADDR;
inst->inst_left = indir;
}
break;
case RegTypeStructByVal : {
MonoInst *indir;
cfg->arch.bkchain_reg = s390_r12;
cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
size = cinfo->args[iParm].size;
if (cinfo->args [iParm].reg == STK_BASE) {
int offStruct = 0;
switch(size) {
case 0: case 1: case 2: case 4: case 8:
offStruct = (size < 8 ? sizeof(uintptr_t) - size : 0);
default:
inst->opcode = OP_REGOFFSET;
inst->dreg = mono_alloc_preg (cfg);
inst->inst_basereg = cfg->arch.bkchain_reg;
inst->inst_offset = cinfo->args [iParm].offset + offStruct;
}
} else {
offset = S390_ALIGN(offset, sizeof(uintptr_t));
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->frame_reg;
inst->inst_offset = offset;
}
switch (size) {
case 0 : case 1 : case 2 : case 4 : case 8 :
break;
default :
/* Add a level of indirection */
MONO_INST_NEW (cfg, indir, 0);
*indir = *inst;
inst->opcode = OP_VTARG_ADDR;
inst->inst_left = indir;
}
}
break;
default :
if (cinfo->args [iParm].reg == STK_BASE) {
/*
* These arguments are in the previous frame, so we can't
* compute their offset from the current frame pointer right
* now, since cfg->stack_offset is not yet known, so dedicate a
* register holding the previous frame pointer.
*/
cfg->arch.bkchain_reg = s390_r12;
cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->arch.bkchain_reg;
size = (cinfo->args[iParm].size < 8
? 8 - cinfo->args[iParm].size
: 0);
inst->inst_offset = cinfo->args [iParm].offset + size;
size = sizeof (long);
} else {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
size = (cinfo->args[iParm].size < 8
? sizeof(int)
: sizeof(long));
offset = S390_ALIGN(offset, size);
if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
inst->inst_offset = offset;
else
inst->inst_offset = offset + (8 - size);
}
}
offset += MAX(size, 8);
}
curinst++;
}
cfg->locals_min_stack_offset = offset;
curinst = cfg->locals_start;
for (iVar = curinst; iVar < cfg->num_varinfo; ++iVar) {
inst = cfg->varinfo [iVar];
if ((inst->flags & MONO_INST_IS_DEAD) ||
(inst->opcode == OP_REGVAR))
continue;
/*--------------------------------------------------*/
/* inst->backend.is_pinvoke indicates native sized */
/* value types this is used by the pinvoke wrappers */
/* when they call functions returning structure */
/*--------------------------------------------------*/
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype))
size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype),
(guint32 *) &align);
else
size = mono_type_size (inst->inst_vtype, &align);
offset = S390_ALIGN(offset, align);
inst->inst_offset = offset;
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
offset += size;
DEBUG (g_print("allocating local %d to %ld, size: %d\n",
iVar, inst->inst_offset, size));
}
offset = S390_ALIGN(offset, sizeof(uintptr_t));
cfg->locals_max_stack_offset = offset;
/*------------------------------------------------------*/
/* Reserve space to save LMF and caller saved registers */
/*------------------------------------------------------*/
if (cfg->method->save_lmf)
offset += sizeof (MonoLMF);
/*------------------------------------------------------*/
/* align the offset */
/*------------------------------------------------------*/
cfg->stack_offset = S390_ALIGN(offset, S390_STACK_ALIGNMENT);
/*------------------------------------------------------*/
/* Fix offsets for args whose value is in parent frame */
/*------------------------------------------------------*/
for (iParm = sArg; iParm < eArg; ++iParm) {
inst = cfg->args [iParm];
if (inst->opcode == OP_S390_STKARG) {
inst->opcode = OP_REGOFFSET;
inst->inst_offset += cfg->stack_offset;
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific creation of variables
*
* @param[in] @cfg - Compile control block
*
* Create variables for the method.
*
*/
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
CallInfo *cinfo;
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->struct_ret) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_tramp_var = ins;
ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.bp_tramp_var = ins;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Add a register to the call operation
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
* @param[in] @storage - Register use type
* @param[in] @reg - Register number
* @param[in] @tree - Call arguments
*
* Add register use information to the call sequence
*/
static void
add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
{
MonoInst *ins;
switch (storage) {
case RegTypeGeneral:
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->dreg = mono_alloc_ireg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
break;
case RegTypeFP:
MONO_INST_NEW (cfg, ins, OP_FMOVE);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
case RegTypeFPR4:
MONO_INST_NEW (cfg, ins, OP_S390_SETF4RET);
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
break;
default:
g_assert_not_reached ();
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit a signature cookine
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
* @param[in] @cinfo - Call Information
*
* Emit the signature cooke as a parameter
*/
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MonoMethodSignature *tmpSig;
MonoInst *sig_arg;
cfg->disable_aot = TRUE;
/*
* mono_ArgIterator_Setup assumes the signature cookie is
* passed first and all the arguments which were before it
* passed on the stack after the signature. So compensate
* by passing a different signature.
*/
tmpSig = mono_metadata_signature_dup (call->signature);
tmpSig->param_count -= call->signature->sentinelpos;
tmpSig->sentinelpos = 0;
if (tmpSig->param_count > 0)
memcpy (tmpSig->params,
call->signature->params + call->signature->sentinelpos,
tmpSig->param_count * sizeof(MonoType *));
MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
sig_arg->dreg = mono_alloc_ireg (cfg);
sig_arg->inst_p0 = tmpSig;
MONO_ADD_INS (cfg->cbb, sig_arg);
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, STK_BASE,
cinfo->sigCookie.offset, sig_arg->dreg);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific emission of a call operation
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
*
* Process all parameters for a call and generate the sequence of
* operations to perform the call according to the s390x ABI.
*/
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
MonoInst *in;
MonoMethodSignature *sig;
MonoInst *ins;
int i, n, lParamArea;
CallInfo *cinfo;
ArgInfo *ainfo = NULL;
int stackSize;
sig = call->signature;
n = sig->param_count + sig->hasthis;
DEBUG (g_print ("Call requires: %d parameters\n",n));
cinfo = get_call_info (cfg->mempool, sig);
stackSize = cinfo->sz.stack_size + cinfo->sz.parm_size;
call->stack_usage = MAX(stackSize, call->stack_usage);
lParamArea = MAX((call->stack_usage-S390_MINIMAL_STACK_SIZE-cinfo->sz.parm_size), 0);
cfg->param_area = MAX(((signed) cfg->param_area), lParamArea); /* FIXME */
cfg->flags |= MONO_CFG_HAS_CALLS;
if (cinfo->struct_ret) {
MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->sreg1 = call->vret_var->dreg;
ins->dreg = mono_alloc_preg (cfg);
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, cinfo->ret.reg, FALSE);
}
for (i = 0; i < n; ++i) {
MonoType *t;
ainfo = cinfo->args + i;
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
t = mono_get_int_type ();
t = mini_get_underlying_type (t);
in = call->args [i];
if ((sig->call_convention == MONO_CALL_VARARG) &&
(!sig->pinvoke) &&
(i == sig->sentinelpos)) {
emit_sig_cookie (cfg, call, cinfo);
}
switch (ainfo->regtype) {
case RegTypeGeneral :
add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
break;
case RegTypeFP :
case RegTypeFPR4 :
if (MONO_TYPE_ISSTRUCT (t)) {
/* Valuetype passed in one fp register */
ainfo->regtype = RegTypeStructByValInFP;
/* Fall through */
} else {
add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
break;
}
case RegTypeStructByVal :
case RegTypeStructByAddr : {
g_assert (in->klass);
MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
ins->sreg1 = in->dreg;
ins->klass = in->klass;
ins->backend.size = ainfo->size;
ins->inst_p0 = call;
ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
MONO_ADD_INS (cfg->cbb, ins);
break;
}
case RegTypeBase :
if (!m_type_is_byref (t) && t->type == MONO_TYPE_R4) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG,
STK_BASE, ainfo->offset + 4,
in->dreg);
} else if (!m_type_is_byref (t) && (t->type == MONO_TYPE_R8)) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG,
STK_BASE, ainfo->offset,
in->dreg);
} else {
MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
ins->inst_destbasereg = STK_BASE;
ins->inst_offset = ainfo->offset;
ins->sreg1 = in->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
break;
default:
g_assert_not_reached ();
break;
}
}
/*
* Handle the case where there are no implicit arguments
*/
if ((sig->call_convention == MONO_CALL_VARARG) &&
(!sig->pinvoke) &&
(i == sig->sentinelpos)) {
emit_sig_cookie (cfg, call, cinfo);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific Value Type parameter processing
*
* @param[in] @cfg - Compile control block
* @param[in] @call - Call Instruction
* @param[in] @src - Source parameter
*
* Process value type parameters for a call operation
*/
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
MonoCallInst *call = (MonoCallInst*) ins->inst_p0;
ArgInfo *ainfo = (ArgInfo *) ins->inst_p1;
if (ainfo->regtype == RegTypeStructByVal) {
if (ainfo->reg != STK_BASE) {
emit_outarg_vtr (cfg, ins, src);
} else {
emit_outarg_vts (cfg, ins, src);
}
} else if (ainfo->regtype == RegTypeStructByValInFP) {
int dreg = mono_alloc_freg (cfg);
if (ainfo->size == 4) {
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, dreg, src->dreg, 0);
MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, dreg, dreg);
} else {
g_assert (ainfo->size == 8);
MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, dreg, src->dreg, 0);
}
mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
} else {
ERROR_DECL (error);
MonoMethodHeader *header;
MonoInst *vtcopy = mono_compile_create_var (cfg, m_class_get_byval_arg (src->klass), OP_LOCAL);
MonoInst *load;
int ovf_size = ainfo->vtsize,
srcReg;
guint32 size;
/* FIXME: alignment? */
if (call->signature->pinvoke && !call->signature->marshalling_disabled) {
size = mono_type_native_stack_size (m_class_get_byval_arg (src->klass), NULL);
vtcopy->backend.is_pinvoke = 1;
} else {
size = mini_type_stack_size (m_class_get_byval_arg (src->klass), NULL);
}
if (size > 0)
g_assert (ovf_size > 0);
header = mono_method_get_header_checked (cfg->method, error);
mono_error_assert_ok (error); /* FIXME don't swallow the error */
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
srcReg = s390_r11;
else
srcReg = STK_BASE;
EMIT_NEW_VARLOADA (cfg, load, vtcopy, vtcopy->inst_vtype);
mini_emit_memcpy (cfg, load->dreg, 0, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
if (ainfo->reg == STK_BASE) {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, srcReg, ainfo->offset, load->dreg);
if (cfg->compute_gc_maps) {
MonoInst *def;
EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, m_class_get_byval_arg (ins->klass));
}
} else
mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg, FALSE);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific call value return processing
*
* @param[in] @cfg - Compile control block
* @param[in] @method - Method
* @param[in] @val - Instruction representing the result returned to method
*
* Create the sequence to unload the value returned from a call
*/
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (!m_type_is_byref (ret)) {
if (ret->type == MONO_TYPE_R4) {
MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, s390_f0, val->dreg);
return;
} else if (ret->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, s390_f0, val->dreg);
return;
}
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
/*========================= End of Function ========================*/
/**
*
* @brief Replace compound compare/branch operations with single operation
*
* @param[in] @bb - Basic block
* @param[in] @ins - Current instruction
* @param[in] @cc - Condition code of branch
* @param[in] @logical - Whether comparison is signed or logical
*
* Form a peephole pass at the code looking for simple optimizations
* that will combine compare/branch instructions into a single operation.
*/
static void
compare_and_branch(MonoBasicBlock *bb, MonoInst *ins, int cc, gboolean logical)
{
MonoInst *last;
if (mono_hwcap_s390x_has_gie) {
last = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
ins->sreg1 = last->sreg1;
ins->sreg2 = last->sreg2;
ins->sreg3 = cc;
switch(last->opcode) {
case OP_ICOMPARE:
if (logical)
ins->opcode = OP_S390_CLRJ;
else
ins->opcode = OP_S390_CRJ;
MONO_DELETE_INS(bb, last);
break;
case OP_COMPARE:
case OP_LCOMPARE:
if (logical)
ins->opcode = OP_S390_CLGRJ;
else
ins->opcode = OP_S390_CGRJ;
MONO_DELETE_INS(bb, last);
break;
case OP_ICOMPARE_IMM:
ins->backend.data = (gpointer) last->inst_imm;
if (logical)
ins->opcode = OP_S390_CLIJ;
else
ins->opcode = OP_S390_CIJ;
MONO_DELETE_INS(bb, last);
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
ins->backend.data = (gpointer) last->inst_imm;
if (logical)
ins->opcode = OP_S390_CLGIJ;
else
ins->opcode = OP_S390_CGIJ;
MONO_DELETE_INS(bb, last);
break;
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecure-specific peephole pass 1 processing
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Form a peephole pass at the code looking for compare and branch
* optimizations.
*/
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_IBEQ:
case OP_LBEQ:
compare_and_branch(bb, ins, S390_CC_EQ, FALSE);
break;
case OP_LBNE_UN:
case OP_IBNE_UN:
compare_and_branch(bb, ins, S390_CC_NE, TRUE);
break;
case OP_LBLT:
case OP_IBLT:
compare_and_branch(bb, ins, S390_CC_LT, FALSE);
break;
case OP_LBLT_UN:
case OP_IBLT_UN:
compare_and_branch(bb, ins, S390_CC_LT, TRUE);
break;
case OP_LBGT:
case OP_IBGT:
compare_and_branch(bb, ins, S390_CC_GT, FALSE);
break;
case OP_LBGT_UN:
case OP_IBGT_UN:
compare_and_branch(bb, ins, S390_CC_GT, TRUE);
break;
case OP_LBGE:
case OP_IBGE:
compare_and_branch(bb, ins, S390_CC_GE, FALSE);
break;
case OP_LBGE_UN:
case OP_IBGE_UN:
compare_and_branch(bb, ins, S390_CC_GE, TRUE);
break;
case OP_LBLE:
case OP_IBLE:
compare_and_branch(bb, ins, S390_CC_LE, FALSE);
break;
case OP_LBLE_UN:
case OP_IBLE_UN:
compare_and_branch(bb, ins, S390_CC_LE, TRUE);
break;
// default:
// mono_peephole_ins (bb, ins);
}
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecure-specific peephole pass 2 processing
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Form a peephole pass at the code looking for simple optimizations.
*/
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *n, *last_ins = NULL;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_LOADU4_MEMBASE:
case OP_LOADI4_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
ins->opcode = (ins->opcode == OP_LOADI4_MEMBASE) ? OP_ICONV_TO_I4 : OP_ICONV_TO_U4;
ins->sreg1 = last_ins->sreg1;
}
break;
}
mono_peephole_ins (bb, ins);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecure-specific lowering pass processing
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Form a lowering pass at the code looking for simple optimizations.
*/
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *next;
MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) {
switch (ins->opcode) {
case OP_DIV_IMM:
case OP_REM_IMM:
case OP_IDIV_IMM:
case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
case OP_LAND_IMM:
case OP_LOR_IMM:
case OP_LREM_IMM:
case OP_LXOR_IMM:
case OP_LOCALLOC_IMM:
mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_LADD_IMM:
if (!s390_is_imm16 (ins->inst_imm))
/* This is created by the memcpy code which ignores is_inst_imm */
mono_decompose_op_imm (cfg, bb, ins);
break;
default:
break;
}
}
bb->max_vreg = cfg->next_vreg;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit float-to-int sequence
*
* @param[in] @cfg - Compile control block
* @param[in] @code - Current instruction area
* @param[in] @dreg - Destination general register
* @param[in] @sreg - Source floating point register
* @param[in] @size - Size of destination
* @param[in] @is_signed - Destination is signed/unsigned
* @returns Next instruction location
*
* Emit instructions to convert a single precision floating point value to an integer
*/
static guchar *
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg. */
if (is_signed) {
s390_cgebr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x8000);
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
} else {
short *o[1];
s390_lgdr (code, s390_r14, s390_f14);
s390_lgdr (code, s390_r13, s390_f15);
S390_SET (code, s390_r0, 0x4f000000u);
s390_ldgr (code, s390_f14, s390_r0);
s390_ler (code, s390_f15, sreg);
s390_cebr (code, s390_f15, s390_f14);
s390_jl (code, 0); CODEPTR (code, o[0]);
S390_SET (code, s390_r0, 0x4f800000u);
s390_ldgr (code, s390_f14, s390_r0);
s390_sebr (code, s390_f15, s390_f14);
s390_cfebr (code, dreg, 7, s390_f15);
s390_j (code, 4);
PTRSLOT (code, o[0]);
s390_cfebr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
s390_ldgr (code, s390_f14, s390_r14);
s390_ldgr (code, s390_f15, s390_r13);
}
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit double-to-int sequence
*
* @param[in] @cfg - Compile control block
* @param[in] @code - Current instruction area
* @param[in] @dreg - Destination general register
* @param[in] @sreg - Source floating point register
* @param[in] @size - Size of destination
* @param[in] @is_signed - Destination is signed/unsigned
* @returns Next instruction location
*
* Emit instructions to convert a single precision floating point value to an integer
*/
static guchar*
emit_double_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
/* sreg is a float, dreg is an integer reg. */
if (is_signed) {
s390_cgdbr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_ltgr (code, dreg, dreg);
s390_jnl (code, 4);
s390_oill (code, dreg, 0x8000);
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
} else {
short *o[1];
s390_lgdr (code, s390_r14, s390_f14);
s390_lgdr (code, s390_r13, s390_f15);
S390_SET (code, s390_r0, 0x41e0000000000000llu);
s390_ldgr (code, s390_f14, s390_r0);
s390_ldr (code, s390_f15, sreg);
s390_cdbr (code, s390_f15, s390_f14);
s390_jl (code, 0); CODEPTR (code, o[0]);
S390_SET (code, s390_r0, 0x41f0000000000000llu);
s390_ldgr (code, s390_f14, s390_r0);
s390_sdbr (code, s390_f15, s390_f14);
s390_cfdbr (code, dreg, 7, s390_f15);
s390_j (code, 4);
PTRSLOT (code, o[0]);
s390_cfdbr (code, dreg, 5, sreg);
switch (size) {
case 1:
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, dreg, s390_r0);
break;
case 2:
s390_llill(code, s390_r0, 0xffff);
s390_ngr (code, dreg, s390_r0);
break;
}
s390_ldgr (code, s390_f14, s390_r14);
s390_ldgr (code, s390_f15, s390_r13);
}
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Check if branch is for unsigned comparison
*
* @param[in] @next - Next instruction
* @returns True if the branch is for an unsigned comparison
*
* Determine if next instruction is a branch for an unsigned comparison
*/
static gboolean
is_unsigned (MonoInst *next)
{
if ((next) &&
(((next->opcode >= OP_IBNE_UN) &&
(next->opcode <= OP_IBLT_UN)) ||
((next->opcode >= OP_LBNE_UN) &&
(next->opcode <= OP_LBLT_UN)) ||
((next->opcode >= OP_COND_EXC_NE_UN) &&
(next->opcode <= OP_COND_EXC_LT_UN)) ||
((next->opcode >= OP_COND_EXC_INE_UN) &&
(next->opcode <= OP_COND_EXC_ILT_UN)) ||
((next->opcode == OP_CLT_UN) ||
(next->opcode == OP_CGT_UN) ||
(next->opcode == OP_ICGE_UN) ||
(next->opcode == OP_ICLE_UN)) ||
((next->opcode == OP_ICLT_UN) ||
(next->opcode == OP_ICGT_UN) ||
(next->opcode == OP_LCLT_UN) ||
(next->opcode == OP_LCGT_UN))))
return TRUE;
else
return FALSE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecutre-specific processing of a basic block
*
* @param[in] @cfg - Compile control block
* @param[in] @bb - Basic block
*
* Process instructions within basic block emitting s390x instructions
* based on the VM operation codes
*/
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins;
MonoCallInst *call;
guint8 *code = cfg->native_code + cfg->code_len;
int src2;
/* we don't align basic blocks of loops on s390 */
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
MONO_BB_FOR_EACH_INS (bb, ins) {
const guint offset = code - cfg->native_code;
set_code_cursor (cfg, code);
int max_len = ins_get_size (ins->opcode);
code = realloc_code (cfg, max_len);
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
case OP_STOREI1_MEMBASE_IMM: {
s390_lghi (code, s390_r0, ins->inst_imm);
S390_LONG (code, stcy, stc, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI2_MEMBASE_IMM: {
s390_lghi (code, s390_r0, ins->inst_imm);
S390_LONG (code, sthy, sth, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI4_MEMBASE_IMM: {
s390_lgfi (code, s390_r0, ins->inst_imm);
S390_LONG (code, sty, st, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM: {
S390_SET (code, s390_r0, ins->inst_imm);
S390_LONG (code, stg, stg, s390_r0, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI1_MEMBASE_REG: {
S390_LONG (code, stcy, stc, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI2_MEMBASE_REG: {
S390_LONG (code, sthy, sth, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STOREI4_MEMBASE_REG: {
S390_LONG (code, sty, st, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG: {
S390_LONG (code, stg, stg, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADU4_MEM:
g_assert_not_reached ();
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE: {
S390_LONG (code, lg, lg, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADI4_MEMBASE: {
S390_LONG (code, lgf, lgf, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADU4_MEMBASE: {
S390_LONG (code, llgf, llgf, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADU1_MEMBASE: {
S390_LONG (code, llgc, llgc, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADI1_MEMBASE: {
S390_LONG (code, lgb, lgb, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADU2_MEMBASE: {
S390_LONG (code, llgh, llgh, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LOADI2_MEMBASE: {
S390_LONG (code, lgh, lgh, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_LCONV_TO_I1: {
s390_lgbr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_I2: {
s390_lghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_U1: {
s390_llgcr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_U2: {
s390_llghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_I1: {
s390_lgbr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_I2: {
s390_lghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_U1: {
s390_llgcr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_U2: {
s390_llghr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_U4: {
s390_llgfr (code, ins->dreg, ins->sreg1);
}
break;
case OP_ICONV_TO_I4: {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
break;
case OP_COMPARE:
case OP_LCOMPARE: {
if (is_unsigned (ins->next))
s390_clgr (code, ins->sreg1, ins->sreg2);
else
s390_cgr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_ICOMPARE: {
if (is_unsigned (ins->next))
s390_clr (code, ins->sreg1, ins->sreg2);
else
s390_cr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM: {
gboolean branchUn = is_unsigned (ins->next);
if ((ins->inst_imm == 0) && (!branchUn)) {
s390_ltgr (code, ins->sreg1, ins->sreg1);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
if (branchUn)
s390_clgr (code, ins->sreg1, s390_r0);
else
s390_cgr (code, ins->sreg1, s390_r0);
}
}
break;
case OP_ICOMPARE_IMM: {
gboolean branchUn = is_unsigned (ins->next);
if ((ins->inst_imm == 0) && (!branchUn)) {
s390_ltr (code, ins->sreg1, ins->sreg1);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
if (branchUn)
s390_clr (code, ins->sreg1, s390_r0);
else
s390_cr (code, ins->sreg1, s390_r0);
}
}
break;
case OP_BREAK: {
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
}
break;
case OP_ADDCC: {
if (mono_hwcap_s390x_has_mlt) {
s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_agr (code, ins->dreg, src2);
}
}
break;
case OP_LADD: {
if (mono_hwcap_s390x_has_mlt) {
s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_agr (code, ins->dreg, src2);
}
}
break;
case OP_ADC: {
CHECK_SRCDST_COM;
s390_alcgr (code, ins->dreg, src2);
}
break;
case OP_ADD_IMM: {
if (mono_hwcap_s390x_has_mlt) {
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agrk (code, ins->dreg, ins->sreg1, s390_r0);
}
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghi (code, ins->dreg, ins->inst_imm);
} else if (s390_is_imm32 (ins->inst_imm)) {
s390_agfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agr (code, ins->dreg, s390_r0);
}
}
}
break;
case OP_LADD_IMM: {
if (mono_hwcap_s390x_has_mlt) {
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghik(code, ins->dreg, ins->sreg1, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agrk (code, ins->dreg, ins->sreg1, s390_r0);
}
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm32 (ins->inst_imm)) {
s390_agfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_agr (code, ins->dreg, s390_r0);
}
}
}
break;
case OP_ADC_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_IADD_OVF:
case OP_S390_IADD_OVF: {
CHECK_SRCDST_COM;
s390_ar (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
}
break;
case OP_IADD_OVF_UN:
case OP_S390_IADD_OVF_UN: {
CHECK_SRCDST_COM;
s390_alr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
s390_llgfr (code, ins->dreg, ins->dreg);
}
break;
case OP_ADD_OVF_CARRY: {
CHECK_SRCDST_COM;
s390_lghi (code, s390_r0, 0);
s390_lgr (code, s390_r1, s390_r0);
s390_alcgr (code, s390_r0, s390_r1);
s390_agr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_agr (code, ins->dreg, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_ADD_OVF_UN_CARRY: {
CHECK_SRCDST_COM;
s390_alcgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
}
break;
case OP_SUBCC: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_sgr (code, ins->dreg, src2);
}
}
break;
case OP_LSUB: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_sgr (code, ins->dreg, src2);
}
}
break;
case OP_SBB: {
CHECK_SRCDST_NCOM;
s390_slbgr(code, ins->dreg, src2);
}
break;
case OP_SUB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_aghi (code, ins->dreg, -ins->inst_imm);
} else if (s390_is_imm32 (-ins->inst_imm)) {
s390_slgfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LSUB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_aghi (code, ins->dreg, -ins->inst_imm);
} else if (s390_is_imm32 (-ins->inst_imm)) {
s390_slgfi (code, ins->dreg, ins->inst_imm);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_SBB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
s390_slbgr (code, ins->dreg, s390_r0);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slbgr(code, ins->dreg, s390_r0);
}
}
break;
case OP_SUB_OVF_CARRY: {
CHECK_SRCDST_NCOM;
s390_lghi (code, s390_r0, 0);
s390_lgr (code, s390_r1, s390_r0);
s390_slbgr (code, s390_r0, s390_r1);
s390_sgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_agr (code, ins->dreg, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_SUB_OVF_UN_CARRY: {
CHECK_SRCDST_NCOM;
s390_slbgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
}
break;
case OP_LAND: {
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
if (ins->sreg1 == ins->dreg) {
s390_ngr (code, ins->dreg, ins->sreg2);
} else {
if (ins->sreg2 == ins->dreg) {
s390_ngr (code, ins->dreg, ins->sreg1);
} else {
s390_lgr (code, ins->dreg, ins->sreg1);
s390_ngr (code, ins->dreg, ins->sreg2);
}
}
}
}
break;
case OP_AND_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_ngr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LDIV: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_dsgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r1);
}
break;
case OP_LDIV_UN: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_lghi (code, s390_r0, 0);
s390_dlgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r1);
}
break;
case OP_LREM: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_dsgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r0);
break;
}
case OP_LREM_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else {
s390_lgfi (code, s390_r13, ins->inst_imm);
}
s390_lgr (code, s390_r0, ins->sreg1);
s390_dsgr (code, s390_r0, s390_r13);
s390_lgfr (code, ins->dreg, s390_r0);
}
break;
case OP_LREM_UN: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_lghi (code, s390_r0, 0);
s390_dlgr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r0);
}
break;
case OP_LOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
if (ins->sreg1 == ins->dreg) {
s390_ogr (code, ins->dreg, ins->sreg2);
} else {
if (ins->sreg2 == ins->dreg) {
s390_ogr (code, ins->dreg, ins->sreg1);
} else {
s390_lgr (code, ins->dreg, ins->sreg1);
s390_ogr (code, ins->dreg, ins->sreg2);
}
}
}
}
break;
case OP_OR_IMM: {
S390_SET_MASK(code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_ogr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LXOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
if (ins->sreg1 == ins->dreg) {
s390_xgr (code, ins->dreg, ins->sreg2);
}
else {
if (ins->sreg2 == ins->dreg) {
s390_xgr (code, ins->dreg, ins->sreg1);
}
else {
s390_lgr (code, ins->dreg, ins->sreg1);
s390_xgr (code, ins->dreg, ins->sreg2);
}
}
}
}
break;
case OP_XOR_IMM: {
S390_SET_MASK(code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_xgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LSHL: {
CHECK_SRCDST_NCOM;
s390_sllg (code, ins->dreg, ins->dreg, src2, 0);
}
break;
case OP_SHL_IMM:
case OP_LSHL_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_sllg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f));
}
break;
case OP_LSHR: {
CHECK_SRCDST_NCOM;
s390_srag (code, ins->dreg, ins->dreg, src2, 0);
}
break;
case OP_SHR_IMM:
case OP_LSHR_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_srag (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f));
}
break;
case OP_SHR_UN_IMM:
case OP_LSHR_UN_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_srlg (code, ins->dreg, ins->dreg, 0, (ins->inst_imm & 0x3f));
}
break;
case OP_LSHR_UN: {
CHECK_SRCDST_NCOM;
s390_srlg (code, ins->dreg, ins->dreg, src2, 0);
}
break;
case OP_LNOT: {
if (ins->sreg1 != ins->dreg) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
s390_lghi (code, s390_r0, -1);
s390_xgr (code, ins->dreg, s390_r0);
}
break;
case OP_LNEG: {
s390_lcgr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LMUL: {
CHECK_SRCDST_COM;
s390_msgr (code, ins->dreg, src2);
}
break;
case OP_MUL_IMM:
case OP_LMUL_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
if ((mono_hwcap_s390x_has_gie) &&
(s390_is_imm32 (ins->inst_imm))) {
s390_msgfi (code, ins->dreg, ins->inst_imm);
} else {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else if (s390_is_imm32 (ins->inst_imm)) {
s390_lgfi (code, s390_r13, ins->inst_imm);
} else {
S390_SET (code, s390_r13, ins->inst_imm);
}
s390_msgr (code, ins->dreg, s390_r13);
}
}
break;
case OP_LMUL_OVF: {
short int *o[2];
if (mono_hwcap_s390x_has_mie2) {
s390_msgrkc (code, ins->dreg, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
} else {
s390_ltgr (code, s390_r1, ins->sreg1);
s390_jz (code, 0); CODEPTR(code, o[0]);
s390_ltgr (code, s390_r0, ins->sreg2);
s390_jnz (code, 6);
s390_lghi (code, s390_r1, 0);
s390_j (code, 0); CODEPTR(code, o[1]);
s390_xgr (code, s390_r0, s390_r1);
s390_msgr (code, s390_r1, ins->sreg2);
s390_xgr (code, s390_r0, s390_r1);
s390_srlg (code, s390_r0, s390_r0, 0, 63);
s390_ltgr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
PTRSLOT (code, o[0]);
PTRSLOT (code, o[1]);
s390_lgr (code, ins->dreg, s390_r1);
}
}
break;
case OP_LMUL_OVF_UN: {
s390_lghi (code, s390_r0, 0);
s390_lgr (code, s390_r1, ins->sreg1);
s390_mlgr (code, s390_r0, ins->sreg2);
s390_ltgr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
s390_lgr (code, ins->dreg, s390_r1);
}
break;
case OP_IADDCC: {
g_assert_not_reached ();
CHECK_SRCDST_COM_I;
s390_algr (code, ins->dreg, src2);
}
break;
case OP_IADD: {
CHECK_SRCDST_COM_I;
s390_agr (code, ins->dreg, src2);
}
break;
case OP_IADC: {
g_assert_not_reached ();
CHECK_SRCDST_COM_I;
s390_alcgr (code, ins->dreg, src2);
}
break;
case OP_IADD_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghi (code, ins->dreg, ins->inst_imm);
} else {
s390_afi (code, ins->dreg, ins->inst_imm);
}
}
break;
case OP_IADC_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
} else {
S390_SET (code, s390_r0, ins->inst_imm);
s390_alcgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LADD_OVF:
case OP_S390_LADD_OVF: {
if (mono_hwcap_s390x_has_mlt) {
s390_agrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_agr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_LADD_OVF_UN:
case OP_S390_LADD_OVF_UN: {
if (mono_hwcap_s390x_has_mlt) {
s390_algrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM;
s390_algr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
}
break;
case OP_ISUBCC: {
if (mono_hwcap_s390x_has_mlt) {
s390_slgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM_I;
s390_slgr (code, ins->dreg, src2);
}
}
break;
case OP_ISUB: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM_I;
s390_sgr (code, ins->dreg, src2);
}
}
break;
case OP_ISBB: {
CHECK_SRCDST_NCOM_I;
s390_slbgr (code, ins->dreg, src2);
}
break;
case OP_ISUB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (-ins->inst_imm)) {
s390_aghi (code, ins->dreg, -ins->inst_imm);
} else {
s390_agfi (code, ins->dreg, -ins->inst_imm);
}
}
break;
case OP_ISBB_IMM: {
S390_SET (code, s390_r0, ins->inst_imm);
s390_slgfr (code, ins->dreg, s390_r0);
}
break;
case OP_ISUB_OVF:
case OP_S390_ISUB_OVF: {
if (mono_hwcap_s390x_has_mlt) {
s390_srk (code, ins->dreg, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
} else {
CHECK_SRCDST_NCOM;
s390_sr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
}
}
break;
case OP_ISUB_OVF_UN:
case OP_S390_ISUB_OVF_UN: {
if (mono_hwcap_s390x_has_mlt) {
s390_slrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_slr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
s390_llgfr(code, ins->dreg, ins->dreg);
}
break;
case OP_LSUB_OVF:
case OP_S390_LSUB_OVF: {
if (mono_hwcap_s390x_has_mlt) {
s390_sgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM;
s390_sgr (code, ins->dreg, src2);
}
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
case OP_LSUB_OVF_UN:
case OP_S390_LSUB_OVF_UN: {
CHECK_SRCDST_NCOM;
s390_slgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
}
break;
case OP_IAND: {
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_NCOM_I;
s390_ngr (code, ins->dreg, src2);
}
}
break;
case OP_IAND_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ngrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_ngr (code, ins->dreg, s390_r0);
}
}
break;
case OP_IDIV: {
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_IDIV_UN: {
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srdl (code, s390_r0, 0, 32);
s390_dlr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_IDIV_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else {
s390_lgfi (code, s390_r13, ins->inst_imm);
}
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_IREM: {
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r0);
break;
case OP_IREM_UN:
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srdl (code, s390_r0, 0, 32);
s390_dlr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r0);
}
break;
case OP_IREM_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r13, ins->inst_imm);
} else {
s390_lgfi (code, s390_r13, ins->inst_imm);
}
s390_lgfr (code, s390_r0, ins->sreg1);
s390_srda (code, s390_r0, 0, 32);
s390_dr (code, s390_r0, ins->sreg2);
s390_lgfr (code, ins->dreg, s390_r0);
}
break;
case OP_IOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM_I;
s390_ogr (code, ins->dreg, src2);
}
}
break;
case OP_IOR_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_ogrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_ogr (code, ins->dreg, s390_r0);
}
}
break;
case OP_IXOR: {
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, ins->sreg2);
} else {
CHECK_SRCDST_COM_I;
s390_xgr (code, ins->dreg, src2);
}
}
break;
case OP_IXOR_IMM: {
S390_SET_MASK (code, s390_r0, ins->inst_imm);
if (mono_hwcap_s390x_has_mlt) {
s390_xgrk (code, ins->dreg, ins->sreg1, s390_r0);
} else {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_xgr (code, ins->dreg, s390_r0);
}
}
break;
case OP_ISHL: {
CHECK_SRCDST_NCOM;
s390_sll (code, ins->dreg, src2, 0);
}
break;
case OP_ISHL_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_sll (code, ins->dreg, 0, (ins->inst_imm & 0x1f));
}
break;
case OP_ISHR: {
CHECK_SRCDST_NCOM;
s390_sra (code, ins->dreg, src2, 0);
}
break;
case OP_ISHR_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_sra (code, ins->dreg, 0, (ins->inst_imm & 0x1f));
}
break;
case OP_ISHR_UN_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_srl (code, ins->dreg, 0, (ins->inst_imm & 0x1f));
}
break;
case OP_ISHR_UN: {
CHECK_SRCDST_NCOM;
s390_srl (code, ins->dreg, src2, 0);
}
break;
case OP_INOT: {
if (ins->sreg1 != ins->dreg) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
s390_lghi (code, s390_r0, -1);
s390_xgr (code, ins->dreg, s390_r0);
}
break;
case OP_INEG: {
s390_lcgr (code, ins->dreg, ins->sreg1);
}
break;
case OP_IMUL: {
CHECK_SRCDST_COM_I;
s390_msr (code, ins->dreg, src2);
}
break;
case OP_IMUL_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgfr (code, ins->dreg, ins->sreg1);
}
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
} else {
s390_lgfi (code, s390_r0, ins->inst_imm);
}
s390_msr (code, ins->dreg, s390_r0);
}
break;
case OP_IMUL_OVF: {
short int *o[2];
if (mono_hwcap_s390x_has_mie2) {
s390_msrkc (code, ins->dreg, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
} else {
s390_ltr (code, s390_r1, ins->sreg1);
s390_jz (code, 0); CODEPTR(code, o[0]);
s390_ltr (code, s390_r0, ins->sreg2);
s390_jnz (code, 6);
s390_lhi (code, s390_r1, 0);
s390_j (code, 0); CODEPTR(code, o[1]);
s390_xr (code, s390_r0, s390_r1);
s390_msr (code, s390_r1, ins->sreg2);
s390_xr (code, s390_r0, s390_r1);
s390_srl (code, s390_r0, 0, 31);
s390_ltr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
PTRSLOT (code, o[0]);
PTRSLOT (code, o[1]);
s390_lgfr (code, ins->dreg, s390_r1);
}
}
break;
case OP_IMUL_OVF_UN: {
s390_lhi (code, s390_r0, 0);
s390_lr (code, s390_r1, ins->sreg1);
s390_mlr (code, s390_r0, ins->sreg2);
s390_ltr (code, s390_r0, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NZ, "OverflowException");
s390_lgfr (code, ins->dreg, s390_r1);
}
break;
case OP_ICONST:
case OP_I8CONST: {
S390_SET (code, ins->dreg, ins->inst_c0);
}
break;
case OP_AOTCONST: {
mono_add_patch_info (cfg, code - cfg->native_code,
(MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
S390_LOAD_TEMPLATE (code, ins->dreg);
}
break;
case OP_JUMP_TABLE: {
mono_add_patch_info (cfg, code - cfg->native_code,
(MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
S390_LOAD_TEMPLATE (code, ins->dreg);
}
break;
case OP_MOVE:
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
break;
case OP_LCONV_TO_I:
case OP_LCONV_TO_I8:
case OP_SEXT_I4:
s390_lgfr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_I4:
s390_lgfr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_U:
case OP_LCONV_TO_U8:
case OP_LCONV_TO_U4:
case OP_ZEXT_I4:
s390_llgfr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_OVF_U4:
S390_SET (code, s390_r0, 4294967295);
s390_clgr (code, ins->sreg1, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException");
s390_ltgr (code, ins->sreg1, ins->sreg1);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException");
s390_llgfr(code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_OVF_I4_UN:
S390_SET (code, s390_r0, 2147483647);
s390_cgr (code, ins->sreg1, s390_r0);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, "OverflowException");
s390_ltgr (code, ins->sreg1, ins->sreg1);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException");
s390_lgfr (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R4:
if (ins->dreg != ins->sreg1)
s390_ler (code, ins->dreg, ins->sreg1);
break;
case OP_RCONV_TO_R8:
s390_ldebr (code, ins->dreg, ins->sreg1);
break;
case OP_FMOVE:
if (ins->dreg != ins->sreg1)
s390_ldr (code, ins->dreg, ins->sreg1);
break;
case OP_RMOVE:
if (ins->dreg != ins->sreg1)
s390_ldr (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I8:
s390_lgdr (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_I8_TO_F:
s390_ldgr (code, ins->dreg, ins->sreg1);
break;
case OP_MOVE_F_TO_I4:
if (!cfg->r4fp) {
s390_ledbr (code, s390_f0, ins->sreg1);
s390_lgdr (code, ins->dreg, s390_f0);
} else {
s390_lgdr (code, ins->dreg, ins->sreg1);
}
s390_srag (code, ins->dreg, ins->dreg, 0, 32);
break;
case OP_MOVE_I4_TO_F:
s390_slag (code, s390_r0, ins->sreg1, 0, 32);
s390_ldgr (code, ins->dreg, s390_r0);
if (!cfg->r4fp)
s390_ldebr (code, ins->dreg, ins->dreg);
break;
case OP_FCONV_TO_R4:
s390_ledbr (code, ins->dreg, ins->sreg1);
if (!cfg->r4fp)
s390_ldebr (code, ins->dreg, ins->dreg);
break;
case OP_S390_SETF4RET:
if (!cfg->r4fp)
s390_ledbr (code, ins->dreg, ins->sreg1);
else
s390_ldr (code, ins->dreg, ins->sreg1);
break;
case OP_TLS_GET: {
if (s390_is_imm16 (ins->inst_offset)) {
s390_lghi (code, s390_r13, ins->inst_offset);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_lgfi (code, s390_r13, ins->inst_offset);
} else {
S390_SET (code, s390_r13, ins->inst_offset);
}
s390_ear (code, s390_r1, 0);
s390_sllg(code, s390_r1, s390_r1, 0, 32);
s390_ear (code, s390_r1, 1);
s390_lg (code, ins->dreg, s390_r13, s390_r1, 0);
}
break;
case OP_TLS_SET: {
if (s390_is_imm16 (ins->inst_offset)) {
s390_lghi (code, s390_r13, ins->inst_offset);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_lgfi (code, s390_r13, ins->inst_offset);
} else {
S390_SET (code, s390_r13, ins->inst_offset);
}
s390_ear (code, s390_r1, 0);
s390_sllg(code, s390_r1, s390_r1, 0, 32);
s390_ear (code, s390_r1, 1);
s390_stg (code, ins->sreg1, s390_r13, s390_r1, 0);
}
break;
case OP_TAILCALL_PARAMETER :
// This opcode helps compute sizes, i.e.
// of the subsequent OP_TAILCALL, but contributes no code.
g_assert (ins->next);
break;
case OP_TAILCALL :
case OP_TAILCALL_REG :
case OP_TAILCALL_MEMBASE : {
call = (MonoCallInst *) ins;
/*
* Restore SP to caller's SP
*/
code = backUpStackPtr(cfg, code);
/*
* If the destination is specified as a register or membase then
* save destination so it doesn't get overwritten by the restores
*/
if (ins->opcode != OP_TAILCALL)
s390_lgr (code, s390_r1, ins->sreg1);
/*
* We have to restore R6, so it cannot be used as argument register.
* This is ensured by mono_arch_tailcall_supported, but verify here.
*/
g_assert (!(call->used_iregs & (1 << S390_LAST_ARG_REG)));
/*
* Likewise for the IMT/RGCTX register
*/
g_assert (!(call->used_iregs & (1 << MONO_ARCH_RGCTX_REG)));
g_assert (!(call->rgctx_reg));
/*
* Restore all general registers
*/
s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
/*
* Restore any FP registers that have been altered
*/
if (cfg->arch.fpSize != 0) {
int fpOffset = -cfg->arch.fpSize;
for (int i = 8; i < 16; i++) {
if (cfg->arch.used_fp_regs & (1 << i)) {
s390_ldy (code, i, 0, STK_BASE, fpOffset);
fpOffset += sizeof(double);
}
}
}
if (ins->opcode == OP_TAILCALL_REG) {
s390_br (code, s390_r1);
} else {
if (ins->opcode == OP_TAILCALL_MEMBASE) {
if (mono_hwcap_s390x_has_mie2) {
s390_bi (code, 0, s390_r1, ins->inst_offset);
} else {
s390_lg (code, s390_r1, 0, s390_r1, ins->inst_offset);
s390_br (code, s390_r1);
}
} else {
mono_add_patch_info_rel (cfg, code - cfg->native_code,
MONO_PATCH_INFO_METHOD_JUMP,
call->method, MONO_R_S390_THUNKED);
S390_BR_TEMPLATE (code, s390_r1);
cfg->thunk_area += THUNK_SIZE;
}
}
}
break;
case OP_CHECK_THIS: {
/* ensure ins->sreg1 is not NULL */
s390_lg (code, s390_r0, 0, ins->sreg1, 0);
s390_ltgr (code, s390_r0, s390_r0);
}
break;
case OP_ARGLIST: {
const int offset = cfg->sig_cookie + cfg->stack_usage;
S390_SET (code, s390_r0, offset);
s390_agr (code, s390_r0, cfg->frame_reg);
s390_stg (code, s390_r0, 0, ins->sreg1, 0);
}
break;
case OP_FCALL: {
call = (MonoCallInst *) ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4)
s390_ldebr (code, s390_f0, s390_f0);
}
break;
case OP_RCALL: {
call = (MonoCallInst *) ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
if (ins->dreg != s390_f0)
s390_ldr (code, ins->dreg, s390_f0);
break;
}
case OP_LCALL:
case OP_VCALL:
case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL: {
call = (MonoCallInst *) ins;
const MonoJumpInfoTarget patch = mono_call_to_patch (call);
code = emit_call (cfg, code, patch.type, patch.target);
}
break;
case OP_FCALL_REG:
call = (MonoCallInst*)ins;
s390_lgr (code, s390_r1, ins->sreg1);
s390_basr (code, s390_r14, s390_r1);
if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4)
s390_ldebr (code, s390_f0, s390_f0);
break;
case OP_RCALL_REG:
call = (MonoCallInst*)ins;
s390_lgr (code, s390_r1, ins->sreg1);
s390_basr (code, s390_r14, s390_r1);
if (ins->dreg != s390_f0)
s390_ldr (code, ins->dreg, s390_f0);
break;
case OP_LCALL_REG:
case OP_VCALL_REG:
case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG: {
s390_lgr (code, s390_r1, ins->sreg1);
s390_basr (code, s390_r14, s390_r1);
}
break;
case OP_FCALL_MEMBASE:
call = (MonoCallInst*)ins;
s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset);
s390_basr (code, s390_r14, s390_r1);
if (!cfg->r4fp && call->signature->ret->type == MONO_TYPE_R4)
s390_ldebr (code, s390_f0, s390_f0);
break;
case OP_RCALL_MEMBASE:
call = (MonoCallInst*)ins;
s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset);
s390_basr (code, s390_r14, s390_r1);
if (ins->dreg != s390_f0)
s390_ldr (code, ins->dreg, s390_f0);
break;
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset);
s390_basr (code, s390_r14, s390_r1);
}
break;
case OP_LOCALLOC: {
int area_offset;
if (cfg->param_area == 0)
area_offset = S390_MINIMAL_STACK_SIZE;
else
area_offset = cfg->param_area;
area_offset = S390_ALIGN(area_offset, S390_STACK_ALIGNMENT);
/* Get current backchain pointer */
s390_lg (code, s390_r13, 0, STK_BASE, 0);
/*
* Round object size to doubleword
*/
s390_lgr (code, s390_r1, ins->sreg1);
s390_aghi (code, s390_r1, 7);
s390_srlg (code, s390_r1, s390_r1, 0, 3);
s390_sllg (code, s390_r1, s390_r1, 0, 3);
if (mono_hwcap_s390x_has_gie) {
if (ins->flags & MONO_INST_INIT)
s390_lgr (code, s390_r0, s390_r1);
s390_risbg (code, ins->dreg, s390_r1, 0, 0xb3, 0);
s390_sgrk (code, ins->dreg, STK_BASE, ins->dreg);
s390_cgr (code, STK_BASE, ins->dreg); /* L0: */
s390_je (code, 9); /* je L1 */
s390_aghi (code, STK_BASE, -4096);
s390_mvghi (code, s390_r15, 0, 0);
s390_j (code, -9); /* j L0 */
s390_risbg (code, ins->dreg, s390_r1, 0x34, 0xbf, 0); /* L1: */
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jz (code, 13); /* jz L2: */
s390_sgr (code, STK_BASE, ins->dreg);
s390_risbg (code, s390_r1, s390_r1, 0x34, 0xbf, 0);
s390_lay (code, s390_r1, s390_r1, STK_BASE, -8);
s390_mvghi (code, s390_r1, 0, 0);
/* L2: */
} else {
s390_lgr (code, ins->dreg, s390_r1);
s390_nill (code, ins->dreg, 0xf000);
s390_lgr (code, s390_r0, STK_BASE);
s390_sgr (code, s390_r0, ins->dreg);
s390_lgr (code, ins->dreg, s390_r0);
s390_cgr (code, STK_BASE, ins->dreg); /* L0: */
s390_je (code, 11); /* je L1 */
s390_aghi (code, STK_BASE, -4096);
s390_lghi (code, s390_r0, 0);
s390_stg (code, s390_r0, 0, STK_BASE, 4088);
s390_j (code, -11); /* j L0 */
s390_lghi (code, ins->dreg, 4095); /* L1: */
s390_ngr (code, ins->dreg, s390_r1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jz (code, 7); /* jz L2 */
s390_sgr (code, STK_BASE, ins->dreg);
s390_stg (code, ins->dreg, s390_r1, STK_BASE, -8);
/* L2: */
if (ins->flags & MONO_INST_INIT)
s390_lgr (code, s390_r0, s390_r1);
}
/*
* Compute address of localloc'd object
*/
s390_lgr (code, s390_r1, STK_BASE);
if (s390_is_imm16(area_offset))
s390_aghi (code, s390_r1, area_offset);
else
s390_agfi (code, s390_r1, area_offset);
s390_aghi (code, s390_r1, 7);
s390_srlg (code, s390_r1, s390_r1, 0, 3);
s390_sllg (code, s390_r1, s390_r1, 0, 3);
s390_lgr (code, ins->dreg, s390_r1);
/* Save backchain pointer */
s390_stg (code, s390_r13, 0, STK_BASE, 0);
/*
* If we need to zero the area then clear from localloc start
* using the length we saved earlier
*/
if (ins->flags & MONO_INST_INIT) {
s390_lgr (code, s390_r1, s390_r0);
s390_lgr (code, s390_r0, ins->dreg);
s390_lgr (code, s390_r14, s390_r12);
s390_lghi (code, s390_r13, 0);
s390_mvcle(code, s390_r0, s390_r12, 0, 0);
s390_jo (code, -2);
s390_lgr (code, s390_r12, s390_r14);
}
/*
* If we have an LMF then we have to adjust its BP
*/
if (cfg->method->save_lmf) {
int lmfOffset = cfg->stack_usage - sizeof(MonoLMF);
if (s390_is_imm16(lmfOffset)) {
s390_lghi (code, s390_r13, lmfOffset);
} else if (s390_is_imm32(lmfOffset)) {
s390_lgfi (code, s390_r13, lmfOffset);
} else {
S390_SET (code, s390_r13, lmfOffset);
}
s390_stg (code, s390_r15, s390_r13, cfg->frame_reg,
MONO_STRUCT_OFFSET(MonoLMF, ebp));
}
}
break;
case OP_THROW: {
s390_lgr (code, s390_r2, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
}
break;
case OP_RETHROW: {
s390_lgr (code, s390_r2, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
}
break;
case OP_START_HANDLER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
S390_LONG (code, stg, stg, s390_r14, 0,
spvar->inst_basereg,
spvar->inst_offset);
}
break;
case OP_ENDFILTER: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (ins->sreg1 != s390_r2)
s390_lgr(code, s390_r2, ins->sreg1);
S390_LONG (code, lg, lg, s390_r14, 0,
spvar->inst_basereg,
spvar->inst_offset);
s390_br (code, s390_r14);
}
break;
case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
S390_LONG (code, lg, lg, s390_r14, 0,
spvar->inst_basereg,
spvar->inst_offset);
s390_br (code, s390_r14);
}
break;
case OP_CALL_HANDLER: {
mono_add_patch_info_rel (cfg, code-cfg->native_code,
MONO_PATCH_INFO_BB, ins->inst_target_bb,
MONO_R_S390_DIRECT);
s390_brasl (code, s390_r14, 0);
for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
}
break;
case OP_LABEL: {
ins->inst_c0 = code - cfg->native_code;
}
break;
case OP_RELAXED_NOP:
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_ICONST:
case OP_DUMMY_I8CONST:
case OP_DUMMY_R8CONST:
case OP_DUMMY_R4CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL: {
}
break;
case OP_IL_SEQ_POINT:
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
case OP_SEQ_POINT: {
MonoInst *var;
RI_Format *o[2];
guint16 displace;
if (cfg->compile_aot)
NOT_IMPLEMENTED;
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
var = cfg->arch.ss_tramp_var;
s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset);
if (mono_hwcap_s390x_has_eif) {
s390_ltg (code, s390_r14, 0, s390_r1, 0);
} else {
s390_lg (code, s390_r14, 0, s390_r1, 0);
s390_ltgr (code, s390_r14, s390_r14);
}
o[0] = (RI_Format *) code;
s390_jz (code, 4);
s390_lgr (code, s390_r1, cfg->frame_reg);
s390_basr (code, s390_r14, s390_r14);
displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2;
o[0]->i2 = displace;
}
/*
* This is the address which is saved in seq points,
*/
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
var = cfg->arch.bp_tramp_var;
s390_lghi (code, s390_r1, 0);
s390_ltgr (code, s390_r1, s390_r1);
o[0] = (RI_Format *) code;
s390_jz (code, 0);
s390_lg (code, s390_r1, 0, var->inst_basereg, var->inst_offset);
if (mono_hwcap_s390x_has_eif) {
s390_ltg (code, s390_r14, 0, s390_r1, 0);
} else {
s390_lg (code, s390_r1, 0, s390_r1, 0);
s390_ltgr (code, s390_r14, s390_r1);
}
o[1] = (RI_Format *) code;
s390_jz (code, 4);
s390_lgr (code, s390_r1, cfg->frame_reg);
s390_basr (code, s390_r14, s390_r14);
displace = ((uintptr_t) code - (uintptr_t) o[0]) / 2;
o[0]->i2 = displace;
displace = ((uintptr_t) code - (uintptr_t) o[1]) / 2;
o[1]->i2 = displace;
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
* to another IL offset.
*/
s390_nop (code);
break;
}
case OP_GENERIC_CLASS_INIT: {
static int byte_offset = -1;
static guint8 bitmask;
short int *jump;
g_assert (ins->sreg1 == S390_FIRST_ARG_REG);
if (byte_offset < 0)
mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
s390_tm (code, ins->sreg1, byte_offset, bitmask);
s390_jo (code, 0); CODEPTR(code, jump);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
PTRSLOT (code, jump);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_BR:
EMIT_UNCOND_BRANCH(ins);
break;
case OP_BR_REG: {
s390_br (code, ins->sreg1);
}
break;
case OP_CEQ:
case OP_ICEQ:
case OP_LCEQ: {
s390_lghi(code, ins->dreg, 1);
s390_jz (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CLT:
case OP_ICLT:
case OP_LCLT: {
s390_lghi(code, ins->dreg, 1);
s390_jl (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CLT_UN:
case OP_ICLT_UN:
case OP_LCLT_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CGT:
case OP_ICGT:
case OP_LCGT: {
s390_lghi(code, ins->dreg, 1);
s390_jh (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CGT_UN:
case OP_ICGT_UN:
case OP_LCGT_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jho (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICNEQ: {
s390_lghi(code, ins->dreg, 1);
s390_jne (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICGE: {
s390_lghi(code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICLE: {
s390_lghi(code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICGE_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_ICLE_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_IEQ:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_EQ, ins->inst_p1);
break;
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_INE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NE, ins->inst_p1);
break;
case OP_COND_EXC_LT:
case OP_COND_EXC_ILT:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_ILT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, ins->inst_p1);
break;
case OP_COND_EXC_GT:
case OP_COND_EXC_IGT:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_IGT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, ins->inst_p1);
break;
case OP_COND_EXC_GE:
case OP_COND_EXC_IGE:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_IGE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GE, ins->inst_p1);
break;
case OP_COND_EXC_LE:
case OP_COND_EXC_ILE:
case OP_COND_EXC_LE_UN:
case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LE, ins->inst_p1);
break;
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, ins->inst_p1);
break;
case OP_COND_EXC_NO:
case OP_COND_EXC_INO:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NO, ins->inst_p1);
break;
case OP_COND_EXC_C:
case OP_COND_EXC_IC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, ins->inst_p1);
break;
case OP_COND_EXC_NC:
case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, ins->inst_p1);
break;
case OP_LBEQ:
case OP_IBEQ:
EMIT_COND_BRANCH (ins, S390_CC_EQ);
break;
case OP_LBNE_UN:
case OP_IBNE_UN:
EMIT_COND_BRANCH (ins, S390_CC_NE);
break;
case OP_LBLT:
case OP_LBLT_UN:
case OP_IBLT:
case OP_IBLT_UN:
EMIT_COND_BRANCH (ins, S390_CC_LT);
break;
case OP_LBGT:
case OP_LBGT_UN:
case OP_IBGT:
case OP_IBGT_UN:
EMIT_COND_BRANCH (ins, S390_CC_GT);
break;
case OP_LBGE:
case OP_LBGE_UN:
case OP_IBGE:
case OP_IBGE_UN:
EMIT_COND_BRANCH (ins, S390_CC_GE);
break;
case OP_LBLE:
case OP_LBLE_UN:
case OP_IBLE:
case OP_IBLE_UN:
EMIT_COND_BRANCH (ins, S390_CC_LE);
break;
case OP_S390_CRJ:
EMIT_COMP_AND_BRANCH(ins, crj, cr);
break;
case OP_S390_CLRJ:
EMIT_COMP_AND_BRANCH(ins, clrj, clr);
break;
case OP_S390_CGRJ:
EMIT_COMP_AND_BRANCH(ins, cgrj, cgr);
break;
case OP_S390_CLGRJ:
EMIT_COMP_AND_BRANCH(ins, clgrj, clgr);
break;
case OP_S390_CIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, crj, cr, ltr, FALSE);
break;
case OP_S390_CLIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, clrj, clr, ltr, TRUE);
break;
case OP_S390_CGIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, cgrj, cgr, ltgr, FALSE);
break;
case OP_S390_CLGIJ:
EMIT_COMP_AND_BRANCH_IMM(ins, clgrj, clgr, ltgr, TRUE);
break;
/* floating point opcodes */
case OP_R8CONST: {
double d = *(double *) ins->inst_p0;
if (d == 0) {
s390_lzdr (code, ins->dreg);
if (mono_signbit (d) != 0)
s390_lndbr (code, ins->dreg, ins->dreg);
} else {
S390_SET (code, s390_r13, ins->inst_p0);
s390_ld (code, ins->dreg, 0, s390_r13, 0);
}
}
break;
case OP_R4CONST: {
float f = *(float *) ins->inst_p0;
if (f == 0) {
if (cfg->r4fp) {
s390_lzer (code, ins->dreg);
if (mono_signbit (f) != 0)
s390_lnebr (code, ins->dreg, ins->dreg);
} else {
s390_lzdr (code, ins->dreg);
if (mono_signbit (f) != 0)
s390_lndbr (code, ins->dreg, ins->dreg);
}
} else {
S390_SET (code, s390_r13, ins->inst_p0);
s390_le (code, ins->dreg, 0, s390_r13, 0);
if (!cfg->r4fp)
s390_ldebr (code, ins->dreg, ins->dreg);
else
s390_le (code, ins->dreg, 0, s390_r13, 0);
}
}
break;
case OP_STORER8_MEMBASE_REG: {
S390_LONG (code, stdy, std, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
}
break;
case OP_LOADR8_MEMBASE: {
S390_LONG (code, ldy, ld, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
}
break;
case OP_STORER4_MEMBASE_REG: {
if (cfg->r4fp) {
S390_LONG (code, stey, ste, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
} else {
s390_ledbr (code, ins->sreg1, ins->sreg1);
S390_LONG (code, stey, ste, ins->sreg1, 0,
ins->inst_destbasereg, ins->inst_offset);
s390_ldebr (code, ins->sreg1, ins->sreg1);
}
}
break;
case OP_LOADR4_MEMBASE: {
if (cfg->r4fp) {
S390_LONG (code, ley, le, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
} else {
S390_LONG (code, ley, le, ins->dreg, 0,
ins->inst_basereg, ins->inst_offset);
s390_ldebr (code, ins->dreg, ins->dreg);
}
}
break;
case OP_ICONV_TO_R_UN: {
if (mono_hwcap_s390x_has_fpe) {
s390_cdlfbr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
s390_llgfr (code, s390_r0, ins->sreg1);
s390_cdgbr (code, ins->dreg, s390_r0);
}
}
break;
case OP_LCONV_TO_R_UN: {
if (mono_hwcap_s390x_has_fpe) {
s390_cdlgbr (code, ins->dreg, 6, ins->sreg1, 0);
} else {
short int *jump;
s390_lgdr (code, s390_r0, s390_r15);
s390_lgdr (code, s390_r1, s390_r13);
s390_lgdr (code, s390_r14, s390_r12);
s390_cxgbr (code, s390_f12, ins->sreg1);
s390_ltgr (code, ins->sreg1, ins->sreg1);
s390_jnl (code, 0); CODEPTR(code, jump);
S390_SET (code, s390_r13, 0x403f000000000000llu);
s390_lgdr (code, s390_f13, s390_r13);
s390_lzdr (code, s390_f15);
s390_axbr (code, s390_f12, s390_f13);
PTRSLOT(code, jump);
s390_ldxbr (code, s390_f13, s390_f12);
s390_ldr (code, ins->dreg, s390_f13);
s390_ldgr (code, s390_f12, s390_r14);
s390_ldgr (code, s390_f13, s390_r1);
s390_ldgr (code, s390_f15, s390_r0);
}
}
break;
case OP_ICONV_TO_R4:
s390_cefbr (code, ins->dreg, ins->sreg1);
if (!cfg->r4fp)
s390_ldebr (code, ins->dreg, ins->dreg);
break;
case OP_LCONV_TO_R4:
s390_cegbr (code, ins->dreg, ins->sreg1);
if (!cfg->r4fp)
s390_ldebr (code, ins->dreg, ins->dreg);
break;
case OP_ICONV_TO_R8:
s390_cdfbr (code, ins->dreg, ins->sreg1);
break;
case OP_LCONV_TO_R8:
s390_cdgbr (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_I1:
s390_cgdbr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_FCONV_TO_U1:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
}
break;
case OP_FCONV_TO_I2:
s390_cgdbr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x8000);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_FCONV_TO_U2:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
}
break;
case OP_FCONV_TO_I4:
s390_cfdbr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_FCONV_TO_U4:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
}
break;
case OP_FCONV_TO_I8:
case OP_FCONV_TO_I:
s390_cgdbr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_FCONV_TO_U8:
if (mono_hwcap_s390x_has_fpe) {
s390_clgdbr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_double_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
}
break;
case OP_RCONV_TO_I1:
s390_cgebr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x80);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_RCONV_TO_U1:
if (mono_hwcap_s390x_has_fpe) {
s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0);
s390_lghi (code, s390_r0, 0xff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
}
break;
case OP_RCONV_TO_I2:
s390_cgebr (code, ins->dreg, 5, ins->sreg1);
s390_ltgr (code, ins->dreg, ins->dreg);
s390_jnl (code, 4);
s390_oill (code, ins->dreg, 0x8000);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
break;
case OP_RCONV_TO_U2:
if (mono_hwcap_s390x_has_fpe) {
s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0);
s390_llill (code, s390_r0, 0xffff);
s390_ngr (code, ins->dreg, s390_r0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
}
break;
case OP_RCONV_TO_I4:
s390_cfebr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_RCONV_TO_U4:
if (mono_hwcap_s390x_has_fpe) {
s390_clfebr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
}
break;
case OP_RCONV_TO_I8:
case OP_RCONV_TO_I:
s390_cgebr (code, ins->dreg, 5, ins->sreg1);
break;
case OP_RCONV_TO_U8:
if (mono_hwcap_s390x_has_fpe) {
s390_clgebr (code, ins->dreg, 5, ins->sreg1, 0);
} else {
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 8, FALSE);
}
break;
case OP_LCONV_TO_OVF_I: {
/* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */
short int *o[5];
s390_ltgr (code, ins->sreg2, ins->sreg2);
s390_jnl (code, 0); CODEPTR(code, o[0]);
s390_ltgr (code, ins->sreg1, ins->sreg1);
s390_jnl (code, 0); CODEPTR(code, o[1]);
s390_lhi (code, s390_r13, -1);
s390_cgr (code, ins->sreg1, s390_r13);
s390_jnz (code, 0); CODEPTR(code, o[2]);
if (ins->dreg != ins->sreg2)
s390_lgr (code, ins->dreg, ins->sreg2);
s390_j (code, 0); CODEPTR(code, o[3]);
PTRSLOT(code, o[0]);
s390_jz (code, 0); CODEPTR(code, o[4]);
PTRSLOT(code, o[1]);
PTRSLOT(code, o[2]);
mono_add_patch_info (cfg, code - cfg->native_code,
MONO_PATCH_INFO_EXC, "OverflowException");
s390_brasl (code, s390_r14, 0);
PTRSLOT(code, o[3]);
PTRSLOT(code, o[4]);
}
break;
case OP_ABS:
s390_lpdbr (code, ins->dreg, ins->sreg1);
break;
case OP_ABSF:
s390_lpebr (code, ins->dreg, ins->sreg1);
break;
case OP_CEIL:
s390_fidbra (code, ins->dreg, 6, ins->sreg1, 4);
break;
case OP_CEILF:
s390_fiebra (code, ins->dreg, 6, ins->sreg1, 4);
break;
case OP_FLOOR:
s390_fidbra (code, ins->dreg, 7, ins->sreg1, 4);
break;
case OP_FLOORF:
s390_fiebra (code, ins->dreg, 7, ins->sreg1, 4);
break;
case OP_FCOPYSIGN:
s390_cpsdr (code, ins->dreg, ins->sreg2, ins->sreg1);
break;
case OP_ROUND:
s390_fidbra (code, ins->dreg, 4, ins->sreg1, 4);
break;
case OP_SQRT:
s390_sqdbr (code, ins->dreg, ins->sreg1);
break;
case OP_SQRTF:
s390_sqebr (code, ins->dreg, ins->sreg1);
break;
case OP_TRUNC:
s390_fidbra (code, ins->dreg, 5, ins->sreg1, 4);
break;
case OP_TRUNCF:
s390_fiebra (code, ins->dreg, 5, ins->sreg1, 4);
break;
case OP_FADD: {
CHECK_SRCDST_COM_F;
s390_adbr (code, ins->dreg, src2);
}
break;
case OP_RADD: {
CHECK_SRCDST_COM_F;
s390_aebr (code, ins->dreg, src2);
}
break;
case OP_FSUB: {
CHECK_SRCDST_NCOM_F(sdbr);
}
break;
case OP_RSUB: {
CHECK_SRCDST_NCOM_F(sebr);
}
break;
case OP_FMUL: {
CHECK_SRCDST_COM_F;
s390_mdbr (code, ins->dreg, src2);
}
break;
case OP_RMUL: {
CHECK_SRCDST_COM_F;
s390_meer (code, ins->dreg, src2);
}
break;
case OP_FDIV: {
CHECK_SRCDST_NCOM_F(ddbr);
}
break;
case OP_RDIV: {
CHECK_SRCDST_NCOM_F(debr);
}
break;
case OP_FNEG: {
s390_lcdbr (code, ins->dreg, ins->sreg1);
}
break;
case OP_RNEG: {
s390_lcebr (code, ins->dreg, ins->sreg1);
}
break;
case OP_FREM: {
CHECK_SRCDST_NCOM_FR(didbr, 5);
}
break;
case OP_RREM: {
CHECK_SRCDST_NCOM_FR(diebr, 5);
}
break;
case OP_FCOMPARE: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_RCOMPARE: {
s390_cebr (code, ins->sreg1, ins->sreg2);
}
break;
case OP_FCEQ: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_je (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCLT: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jl (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCLT_UN: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCGT: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jh (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCGT_UN: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jho (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCNEQ: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jne (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCGE: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FCLE: {
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCEQ: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_je (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCLT: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jl (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCLT_UN: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCGT: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jh (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCGT_UN: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jho (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCNEQ: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jne (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCGE: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jhe (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_RCLE: {
if (cfg->r4fp)
s390_cebr (code, ins->sreg1, ins->sreg2);
else
s390_cdbr (code, ins->sreg1, ins->sreg2);
s390_lghi (code, ins->dreg, 1);
s390_jle (code, 4);
s390_lghi (code, ins->dreg, 0);
}
break;
case OP_FBEQ: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_EQ);
PTRSLOT (code, o);
}
break;
case OP_FBNE_UN:
EMIT_COND_BRANCH (ins, S390_CC_NE|S390_CC_OV);
break;
case OP_FBLT: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_LT);
PTRSLOT (code, o);
}
break;
case OP_FBLT_UN:
EMIT_COND_BRANCH (ins, S390_CC_LT|S390_CC_OV);
break;
case OP_FBGT: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_GT);
PTRSLOT (code, o);
}
break;
case OP_FBGT_UN:
EMIT_COND_BRANCH (ins, S390_CC_GT|S390_CC_OV);
break;
case OP_FBGE: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_GE);
PTRSLOT (code, o);
}
break;
case OP_FBGE_UN:
EMIT_COND_BRANCH (ins, S390_CC_GE|S390_CC_OV);
break;
case OP_FBLE: {
short *o;
s390_jo (code, 0); CODEPTR(code, o);
EMIT_COND_BRANCH (ins, S390_CC_LE);
PTRSLOT (code, o);
}
break;
case OP_FBLE_UN:
EMIT_COND_BRANCH (ins, S390_CC_LE|S390_CC_OV);
break;
case OP_CKFINITE: {
short *o;
s390_lhi (code, s390_r13, 0x7f);
s390_tcdb (code, ins->sreg1, 0, s390_r13, 0);
s390_jz (code, 0); CODEPTR(code, o);
mono_add_patch_info (cfg, code - cfg->native_code,
MONO_PATCH_INFO_EXC, "OverflowException");
s390_brasl (code, s390_r14,0);
PTRSLOT(code, o);
}
break;
case OP_S390_MOVE: {
if (ins->backend.size > 0) {
if (ins->backend.size <= 256) {
s390_mvc (code, ins->backend.size, ins->sreg2,
ins->inst_offset, ins->sreg1, ins->inst_imm);
} else {
s390_lgr (code, s390_r0, ins->sreg2);
if (ins->inst_offset > 0) {
if (s390_is_imm16 (ins->inst_offset)) {
s390_aghi (code, s390_r0, ins->inst_offset);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_agfi (code, s390_r0, ins->inst_offset);
} else {
S390_SET (code, s390_r13, ins->inst_offset);
s390_agr (code, s390_r0, s390_r13);
}
}
s390_lgr (code, s390_r12, ins->sreg1);
if (ins->inst_imm > 0) {
if (s390_is_imm16 (ins->inst_imm)) {
s390_aghi (code, s390_r12, ins->inst_imm);
} else if (s390_is_imm32 (ins->inst_imm)) {
s390_agfi (code, s390_r12, ins->inst_imm);
} else {
S390_SET (code, s390_r13, ins->inst_imm);
s390_agr (code, s390_r12, s390_r13);
}
}
if (s390_is_imm16 (ins->backend.size)) {
s390_lghi (code, s390_r1, ins->backend.size);
} else if (s390_is_imm32 (ins->inst_offset)) {
s390_agfi (code, s390_r1, ins->backend.size);
} else {
S390_SET (code, s390_r13, ins->backend.size);
s390_agr (code, s390_r1, s390_r13);
}
s390_lgr (code, s390_r13, s390_r1);
s390_mvcle(code, s390_r0, s390_r12, 0, 0);
s390_jo (code, -2);
}
}
}
break;
case OP_ATOMIC_ADD_I8: {
if (mono_hwcap_s390x_has_ia) {
s390_laag(code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
if (mono_hwcap_s390x_has_mlt) {
s390_agrk(code, ins->dreg, s390_r0, ins->sreg2);
} else {
s390_agr (code, s390_r0, ins->sreg2);
s390_lgr (code, ins->dreg, s390_r0);
}
} else {
s390_lgr (code, s390_r1, ins->sreg2);
s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_agr (code, s390_r1, s390_r0);
s390_csg (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -10);
s390_lgr (code, ins->dreg, s390_r1);
}
}
break;
case OP_ATOMIC_EXCHANGE_I8: {
s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_csg (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -6);
s390_lgr (code, ins->dreg, s390_r0);
}
break;
case OP_ATOMIC_ADD_I4: {
if (mono_hwcap_s390x_has_ia) {
s390_laa (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
s390_ar (code, s390_r0, ins->sreg2);
s390_lgfr(code, ins->dreg, s390_r0);
} else {
s390_lgfr(code, s390_r1, ins->sreg2);
s390_lgf (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_agr (code, s390_r1, s390_r0);
s390_cs (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -9);
s390_lgfr(code, ins->dreg, s390_r1);
}
}
break;
case OP_ATOMIC_EXCHANGE_I4: {
s390_l (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_cs (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
s390_jnz (code, -4);
s390_lgfr(code, ins->dreg, s390_r0);
}
break;
case OP_S390_BKCHAIN: {
s390_lgr (code, ins->dreg, ins->sreg1);
if (s390_is_imm16 (cfg->stack_offset)) {
s390_aghi (code, ins->dreg, cfg->stack_offset);
} else if (s390_is_imm32 (cfg->stack_offset)) {
s390_agfi (code, ins->dreg, cfg->stack_offset);
} else {
S390_SET (code, s390_r13, cfg->stack_offset);
s390_agr (code, ins->dreg, s390_r13);
}
}
break;
case OP_MEMORY_BARRIER:
s390_mem (code);
break;
case OP_POPCNT32:
s390_llgfr (code, s390_r1, ins->sreg1);
if (mono_hwcap_s390x_has_mie3) {
s390_popcnt (code, ins->dreg, 0x80, s390_r1);
} else {
s390_popcnt (code, s390_r0, 0, s390_r1);
s390_ahhlr (code, s390_r0, s390_r0, s390_r0);
s390_sllg (code, s390_r1, s390_r0, 0, 16);
s390_algr (code, s390_r0, s390_r1);
s390_sllg (code, s390_r1, s390_r0, 0, 8);
s390_algr (code, s390_r0, s390_r1);
s390_srlg (code, ins->dreg, s390_r0, 0, 56);
}
break;
case OP_POPCNT64:
if (mono_hwcap_s390x_has_mie3) {
s390_popcnt (code, ins->dreg, 0x80, ins->sreg1);
} else {
s390_ahhlr (code, s390_r0, s390_r0, s390_r0);
s390_sllg (code, s390_r1, s390_r0, 0, 16);
s390_algr (code, s390_r0, s390_r1);
s390_sllg (code, s390_r1, s390_r0, 0, 8);
s390_algr (code, s390_r0, s390_r1);
s390_srlg (code, ins->dreg, s390_r0, 0, 56);
}
break;
case OP_LIVERANGE_START: {
if (cfg->verbose_level > 1)
printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
break;
}
case OP_LIVERANGE_END: {
if (cfg->verbose_level > 1)
printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
case OP_GC_SAFE_POINT: {
short *br;
s390_ltg (code, s390_r0, 0, ins->sreg1, 0);
s390_jz (code, 0); CODEPTR(code, br);
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
PTRSLOT (code, br);
break;
}
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
break;
case OP_GC_SPILL_SLOT_LIVENESS_DEF:
ins->backend.pc_offset = code - cfg->native_code;
bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
break;
#ifdef MONO_ARCH_SIMD_INTRINSICS
case OP_ADDPS:
s390x_addps (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPS:
s390x_divps (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPS:
s390x_mulps (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPS:
s390x_subps (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPS:
s390x_maxps (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPS:
s390x_minps (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
s390x_cmpps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPS:
s390x_andps (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPS:
s390x_andnps (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPS:
s390x_orps (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPS:
s390x_xorps (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPS:
s390x_sqrtps (code, ins->dreg, ins->sreg1);
break;
case OP_RSQRTPS:
s390x_rsqrtps (code, ins->dreg, ins->sreg1);
break;
case OP_RCPPS:
s390x_rcpps (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPS:
s390x_addsubps (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPS:
s390x_haddps (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPS:
s390x_hsubps (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPS_HIGH:
s390x_movshdup (code, ins->dreg, ins->sreg1);
break;
case OP_DUPPS_LOW:
s390x_movsldup (code, ins->dreg, ins->sreg1);
break;
case OP_PSHUFLEW_HIGH:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_pshufhw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLEW_LOW:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_pshuflw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_PSHUFLED:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_pshufd_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_SHUFPS:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
s390x_shufps_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_SHUFPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3);
s390x_shufpd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ADDPD:
s390x_addpd (code, ins->sreg1, ins->sreg2);
break;
case OP_DIVPD:
s390x_divpd (code, ins->sreg1, ins->sreg2);
break;
case OP_MULPD:
s390x_mulpd (code, ins->sreg1, ins->sreg2);
break;
case OP_SUBPD:
s390x_subpd (code, ins->sreg1, ins->sreg2);
break;
case OP_MAXPD:
s390x_maxpd (code, ins->sreg1, ins->sreg2);
break;
case OP_MINPD:
s390x_minpd (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPPD:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
s390x_cmppd_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_ANDPD:
s390x_andpd (code, ins->sreg1, ins->sreg2);
break;
case OP_ANDNPD:
s390x_andnpd (code, ins->sreg1, ins->sreg2);
break;
case OP_ORPD:
s390x_orpd (code, ins->sreg1, ins->sreg2);
break;
case OP_XORPD:
s390x_xorpd (code, ins->sreg1, ins->sreg2);
break;
case OP_SQRTPD:
s390x_sqrtpd (code, ins->dreg, ins->sreg1);
break;
case OP_ADDSUBPD:
s390x_addsubpd (code, ins->sreg1, ins->sreg2);
break;
case OP_HADDPD:
s390x_haddpd (code, ins->sreg1, ins->sreg2);
break;
case OP_HSUBPD:
s390x_hsubpd (code, ins->sreg1, ins->sreg2);
break;
case OP_DUPPD:
s390x_movddup (code, ins->dreg, ins->sreg1);
break;
case OP_EXTRACT_MASK:
s390x_pmovmskb (code, ins->dreg, ins->sreg1);
break;
case OP_PAND:
s390x_pand (code, ins->sreg1, ins->sreg2);
break;
case OP_POR:
s390x_por (code, ins->sreg1, ins->sreg2);
break;
case OP_PXOR:
s390x_pxor (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB:
s390x_paddb (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW:
s390x_paddw (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDD:
s390x_paddd (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDQ:
s390x_paddq (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB:
s390x_psubb (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW:
s390x_psubw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBD:
s390x_psubd (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBQ:
s390x_psubq (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB_UN:
s390x_pmaxub (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW_UN:
s390x_pmaxuw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD_UN:
s390x_pmaxud (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXB:
s390x_pmaxsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXW:
s390x_pmaxsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMAXD:
s390x_pmaxsd (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGB_UN:
s390x_pavgb (code, ins->sreg1, ins->sreg2);
break;
case OP_PAVGW_UN:
s390x_pavgw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB_UN:
s390x_pminub (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW_UN:
s390x_pminuw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND_UN:
s390x_pminud (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINB:
s390x_pminsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PMINW:
s390x_pminsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMIND:
s390x_pminsd (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQB:
s390x_pcmpeqb (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQW:
s390x_pcmpeqw (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQD:
s390x_pcmpeqd (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPEQQ:
s390x_pcmpeqq (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTB:
s390x_pcmpgtb (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTW:
s390x_pcmpgtw (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTD:
s390x_pcmpgtd (code, ins->sreg1, ins->sreg2);
break;
case OP_PCMPGTQ:
s390x_pcmpgtq (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUM_ABS_DIFF:
s390x_psadbw (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWB:
s390x_punpcklbw (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWW:
s390x_punpcklwd (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWD:
s390x_punpckldq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWQ:
s390x_punpcklqdq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPS:
s390x_unpcklps (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_LOWPD:
s390x_unpcklpd (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHB:
s390x_punpckhbw (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHW:
s390x_punpckhwd (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHD:
s390x_punpckhdq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHQ:
s390x_punpckhqdq (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPS:
s390x_unpckhps (code, ins->sreg1, ins->sreg2);
break;
case OP_UNPACK_HIGHPD:
s390x_unpckhpd (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW:
s390x_packsswb (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD:
s390x_packssdw (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKW_UN:
s390x_packuswb (code, ins->sreg1, ins->sreg2);
break;
case OP_PACKD_UN:
s390x_packusdw (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT_UN:
s390x_paddusb (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT_UN:
s390x_psubusb (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT_UN:
s390x_paddusw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT_UN:
s390x_psubusw (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDB_SAT:
s390x_paddsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBB_SAT:
s390x_psubsb (code, ins->sreg1, ins->sreg2);
break;
case OP_PADDW_SAT:
s390x_paddsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSUBW_SAT:
s390x_psubsw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW:
s390x_pmullw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULD:
s390x_pmulld (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULQ:
s390x_pmuludq (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH_UN:
s390x_pmulhuw (code, ins->sreg1, ins->sreg2);
break;
case OP_PMULW_HIGH:
s390x_pmulhw (code, ins->sreg1, ins->sreg2);
break;
case OP_PSHRW:
s390x_psrlw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRW_REG:
s390x_psrlw (code, ins->dreg, ins->sreg2);
break;
case OP_PSARW:
s390x_psraw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARW_REG:
s390x_psraw (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLW:
s390x_psllw_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLW_REG:
s390x_psllw (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRD:
s390x_psrld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRD_REG:
s390x_psrld (code, ins->dreg, ins->sreg2);
break;
case OP_PSARD:
s390x_psrad_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARD_REG:
s390x_psrad (code, ins->dreg, ins->sreg2);
break;
case OP_PSHLD:
s390x_pslld_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLD_REG:
s390x_pslld (code, ins->dreg, ins->sreg2);
break;
case OP_PSHRQ:
s390x_psrlq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHRQ_REG:
s390x_psrlq (code, ins->dreg, ins->sreg2);
break;
/*TODO: This is appart of the sse spec but not added
case OP_PSARQ:
s390x_psraq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSARQ_REG:
s390x_psraq (code, ins->dreg, ins->sreg2);
break;
*/
case OP_PSHLQ:
s390x_psllq_reg_imm (code, ins->dreg, ins->inst_imm);
break;
case OP_PSHLQ_REG:
s390x_psllq (code, ins->dreg, ins->sreg2);
break;
case OP_CVTDQ2PD:
s390x_cvtdq2pd (code, ins->dreg, ins->sreg1);
break;
case OP_CVTDQ2PS:
s390x_cvtdq2ps (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2DQ:
s390x_cvtpd2dq (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPD2PS:
s390x_cvtpd2ps (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2DQ:
s390x_cvtps2dq (code, ins->dreg, ins->sreg1);
break;
case OP_CVTPS2PD:
s390x_cvtps2pd (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPD2DQ:
s390x_cvttpd2dq (code, ins->dreg, ins->sreg1);
break;
case OP_CVTTPS2DQ:
s390x_cvttps2dq (code, ins->dreg, ins->sreg1);
break;
case OP_ICONV_TO_X:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I4:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_EXTRACT_I8:
if (ins->inst_c0) {
amd64_movhlps (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8);
} else {
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
}
break;
case OP_EXTRACT_I1:
case OP_EXTRACT_U1:
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8);
amd64_widen_reg (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I1, FALSE);
break;
case OP_EXTRACT_I2:
case OP_EXTRACT_U2:
/*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
if (ins->inst_c0)
amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/
s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->inst_c1 == OP_EXTRACT_I2, TRUE, 4);
break;
case OP_EXTRACT_R8:
if (ins->inst_c0)
amd64_movhlps (code, ins->dreg, ins->sreg1);
else
s390x_movsd (code, ins->dreg, ins->sreg1);
break;
case OP_INSERT_I2:
s390x_pinsrw_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
break;
case OP_EXTRACTX_U2:
s390x_pextrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
case OP_INSERTX_U1_SLOW:
/*sreg1 is the extracted ireg (scratch)
/sreg2 is the to be inserted ireg (scratch)
/dreg is the xreg to receive the value*/
/*clear the bits from the extracted word*/
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00);
/*shift the value to insert if needed*/
if (ins->inst_c0 & 1)
amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4);
/*join them together*/
amd64_alu (code, X86_OR, ins->sreg1, ins->sreg2);
s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2);
break;
case OP_INSERTX_I4_SLOW:
s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2);
amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16);
s390x_pinsrw_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1);
break;
case OP_INSERTX_I8_SLOW:
amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8);
if (ins->inst_c0)
amd64_movlhps (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
else
s390x_movsd (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
break;
case OP_INSERTX_R4_SLOW:
switch (ins->inst_c0) {
case 0:
if (cfg->r4fp)
s390x_movss (code, ins->dreg, ins->sreg2);
else
s390x_cvtsd2ss (code, ins->dreg, ins->sreg2);
break;
case 1:
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
if (cfg->r4fp)
s390x_movss (code, ins->dreg, ins->sreg2);
else
s390x_cvtsd2ss (code, ins->dreg, ins->sreg2);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
break;
case 2:
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
if (cfg->r4fp)
s390x_movss (code, ins->dreg, ins->sreg2);
else
s390x_cvtsd2ss (code, ins->dreg, ins->sreg2);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
break;
case 3:
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
if (cfg->r4fp)
s390x_movss (code, ins->dreg, ins->sreg2);
else
s390x_cvtsd2ss (code, ins->dreg, ins->sreg2);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
break;
}
break;
case OP_INSERTX_R8_SLOW:
if (ins->inst_c0)
amd64_movlhps (code, ins->dreg, ins->sreg2);
else
s390x_movsd (code, ins->dreg, ins->sreg2);
break;
case OP_STOREX_MEMBASE_REG:
case OP_STOREX_MEMBASE:
s390x_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_LOADX_MEMBASE:
s390x_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_LOADX_ALIGNED_MEMBASE:
s390x_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_STOREX_ALIGNED_MEMBASE_REG:
s390x_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
break;
case OP_STOREX_NTA_MEMBASE_REG:
s390x_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
break;
case OP_PREFETCH_MEMBASE:
s390x_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
break;
case OP_XMOVE:
/*FIXME the peephole pass should have killed this*/
if (ins->dreg != ins->sreg1)
s390x_movaps (code, ins->dreg, ins->sreg1);
break;
case OP_XZERO:
s390x_pxor (code, ins->dreg, ins->dreg);
break;
case OP_ICONV_TO_R4_RAW:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
case OP_FCONV_TO_R8_X:
s390x_movsd (code, ins->dreg, ins->sreg1);
break;
case OP_XCONV_R8_TO_I4:
s390x_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
switch (ins->backend.source_opcode) {
case OP_FCONV_TO_I1:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
break;
case OP_FCONV_TO_U1:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_FCONV_TO_I2:
amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
break;
case OP_FCONV_TO_U2:
amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
break;
}
break;
case OP_EXPAND_I2:
s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 0);
s390x_pinsrw_imm (code, ins->dreg, ins->sreg1, 1);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I4:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_I8:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44);
break;
case OP_EXPAND_R4:
if (cfg->r4fp) {
s390x_movsd (code, ins->dreg, ins->sreg1);
} else {
s390x_movsd (code, ins->dreg, ins->sreg1);
s390x_cvtsd2ss (code, ins->dreg, ins->dreg);
}
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0);
break;
case OP_EXPAND_R8:
s390x_movsd (code, ins->dreg, ins->sreg1);
s390x_pshufd_imm (code, ins->dreg, ins->dreg, 0x44);
break;
#endif
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
}
}
set_code_cursor (cfg, code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific registration of lowlevel calls
*
* Register routines to register optimized lowlevel operations
*/
void
mono_arch_register_lowlevel_calls (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific patching
* @param[in] @cfg - Compilation control block
* @param[in] @code - Start of code
* @param[in] @target - Target of patch
* @param[in] @relo - Relocation type
*
* Perform patching action
*/
static void
emit_patch_full (MonoCompile *cfg, MonoJumpInfo *ji, guint8 *code,
gpointer target, int relo)
{
guint8 *ip = ji->ip.i + code;
switch (relo) {
case MONO_R_S390_RELINS :
target = S390_RELATIVE(target, ip);
ip += 2;
s390_patch_rel (ip, (guint64) target);
break;
case MONO_R_S390_THUNKED :
if (cfg)
create_thunk(cfg, ip, code, target);
else
update_thunk(cfg, code, target);
break;
case MONO_R_S390_DIRECT :
S390_EMIT_CALL (ip, target);
break;
case MONO_R_S390_ADDR :
s390_patch_addr (ip, (guint64) target);
break;
case MONO_R_S390_SWITCH :
S390_EMIT_LOAD (ip, target);
break;
case MONO_R_S390_REL :
target = S390_RELATIVE(target, ip);
s390_patch_rel (ip, (guint64) target);
break;
default :
g_assert_not_reached();
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific patching of instructions and data
*
* @param[in] @cfg - Compile control block
* @param[in] @method - Current method
* @param[in] @code - Current code block
* @param[in] @ji - Jump information
* @param[in] @target - Target of patch
*
* Process the patch data created during the instruction build process.
* This resolves jumps, calls, variables etc.
*/
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
switch (ji->type) {
case MONO_PATCH_INFO_IP:
case MONO_PATCH_INFO_LDSTR:
case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_EXC:
emit_patch_full (cfg, ji, code, target, MONO_R_S390_ADDR);
break;
case MONO_PATCH_INFO_BB:
case MONO_PATCH_INFO_JIT_ICALL_ADDR:
case MONO_PATCH_INFO_JIT_ICALL_ID:
case MONO_PATCH_INFO_METHOD:
emit_patch_full (cfg, ji, code, target, ji->relocation);
break;
case MONO_PATCH_INFO_METHOD_JUMP:
case MONO_PATCH_INFO_RGCTX_FETCH:
case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR:
case MONO_PATCH_INFO_ABS:
emit_patch_full (cfg, ji, code, target, MONO_R_S390_THUNKED);
break;
case MONO_PATCH_INFO_SWITCH:
emit_patch_full(cfg, ji, code, target, MONO_R_S390_SWITCH);
break;
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IMAGE:
case MONO_PATCH_INFO_FIELD:
case MONO_PATCH_INFO_IID:
case MONO_PATCH_INFO_EXC_NAME:
emit_patch_full(cfg, ji, code, target, MONO_R_S390_REL);
break;
case MONO_PATCH_INFO_NONE:
break;
default:
emit_patch_full (cfg, ji, code, target, MONO_R_S390_RELINS);
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific prolog generation
*
* @param[in] @cfg - Compile control block
* @returns Location of code code generated
*
* Create the instruction sequence for entry into a method:
* - Determine stack size
* - Save preserved registers
* - Unload parameters
* - Determine if LMF needs saving and generate that sequence
*/
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
long alloc_size, pos, max_offset, i, cfa_offset = 0;
guint8 *code;
guint32 size;
CallInfo *cinfo;
int argsClobbered = 0,
lmfOffset,
fpOffset = 0;
cfg->code_size = 512;
if (method->save_lmf)
cfg->code_size += 200;
cfg->native_code = code = (guint8 *) g_malloc (cfg->code_size);
/**
* Create unwind information
*/
mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET);
s390_stmg (code, s390_r6, s390_r15, STK_BASE, S390_REG_SAVE_OFFSET);
emit_unwind_regs(cfg, code, s390_r6, s390_r15, S390_REG_SAVE_OFFSET - S390_CFA_OFFSET);
if (cfg->arch.bkchain_reg != -1)
s390_lgr (code, cfg->arch.bkchain_reg, STK_BASE);
/*
* If there are local allocations the R11 becomes the frame register
*/
if (cfg->flags & MONO_CFG_HAS_ALLOCA) {
cfg->used_int_regs |= 1 << s390_r11;
}
/*
* Check if FP registers need preserving
*/
if ((cfg->arch.used_fp_regs & S390_FP_SAVE_MASK) != 0) {
for (int i = s390_f8; i <= s390_f15; i++) {
if (cfg->arch.used_fp_regs & (1 << i))
fpOffset += sizeof(double);
}
fpOffset = S390_ALIGN(fpOffset, sizeof(double));
}
cfg->arch.fpSize = fpOffset;
/*
* Calculate stack requirements
*/
alloc_size = cfg->stack_offset + fpOffset;
cfg->stack_usage = cfa_offset = alloc_size;
s390_lgr (code, s390_r11, STK_BASE);
if (s390_is_imm16 (alloc_size)) {
s390_aghi (code, STK_BASE, -alloc_size);
} else if (s390_is_imm32 (alloc_size)) {
s390_agfi (code, STK_BASE, -alloc_size);
} else {
int stackSize = alloc_size;
while (stackSize > INT_MAX) {
s390_agfi (code, STK_BASE, -INT_MAX);
stackSize -= INT_MAX;
}
s390_agfi (code, STK_BASE, -stackSize);
}
mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size + S390_CFA_OFFSET);
s390_stg (code, s390_r11, 0, STK_BASE, 0);
if (fpOffset > 0) {
int stkOffset = 0;
s390_lgr (code, s390_r1, s390_r11);
s390_aghi (code, s390_r1, -fpOffset);
for (int i = s390_f8; i <= s390_f15; i++) {
if (cfg->arch.used_fp_regs & (1 << i)) {
s390_std (code, i, 0, s390_r1, stkOffset);
emit_unwind_regs(cfg, code, 16+i, 16+i, stkOffset+fpOffset - S390_CFA_OFFSET);
stkOffset += sizeof(double);
}
}
}
if (cfg->frame_reg != STK_BASE) {
s390_lgr (code, s390_r11, STK_BASE);
mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
}
/* store runtime generic context */
if (cfg->rgctx_var) {
g_assert (cfg->rgctx_var->opcode == OP_REGOFFSET);
s390_stg (code, MONO_ARCH_RGCTX_REG, 0,
cfg->rgctx_var->inst_basereg,
cfg->rgctx_var->inst_offset);
}
#if 0
char *methodName = getenv("MONO_TRACE_METHOD");
if (methodName != NULL) {
printf("ns: %s k: %s m: %s\n",method->klass->name_space,method->klass->name,method->name);fflush(stdout);
// Tests:set_ip
//if ((strcmp(method->klass->name_space,"") == 0) &&
// (strcmp(method->klass->name,"Tests") == 0) &&
// (strcmp(method->name, "set_ip") == 0)) {
// (strcmp("CancellationToken,TaskCreationOptions,TaskContinuationOptions,TaskScheduler",mono_signature_get_desc(method->signature, FALSE)) != 0)) {
if ((strcmp(method->name, methodName) == 0)) {
printf("SIGNATURE: %s\n",mono_signature_get_desc(method->signature, FALSE)); fflush(stdout);
s390_j (code, 0);
}
}
#endif
/* compute max_offset in order to use short forward jumps
* we always do it on s390 because the immediate displacement
* for jumps is too small
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *ins;
bb->max_offset = max_offset;
MONO_BB_FOR_EACH_INS (bb, ins)
max_offset += ins_get_size (ins->opcode);
}
/* load arguments allocated to register from the stack */
sig = mono_method_signature_internal (method);
pos = 0;
cinfo = cfg->arch.cinfo;
if (cinfo->struct_ret) {
ArgInfo *ainfo = &cinfo->ret;
inst = cfg->vret_addr;
inst->backend.size = ainfo->vtsize;
if (inst->opcode == OP_REGVAR)
s390_lgr (code, inst->dreg, ainfo->reg);
else
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
}
/**
* Process the arguments passed to the method
*/
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
inst = cfg->args [pos];
if (inst->opcode == OP_VTARG_ADDR)
inst = inst->inst_left;
if (inst->opcode == OP_REGVAR) {
if (ainfo->regtype == RegTypeGeneral)
s390_lgr (code, inst->dreg, ainfo->reg);
else if (ainfo->regtype == RegTypeFP) {
if (inst->dreg != ainfo->reg) {
s390_ldr (code, inst->dreg, ainfo->reg);
}
} else if (ainfo->regtype == RegTypeFPR4) {
if (!cfg->r4fp)
s390_ledbr (code, inst->dreg, ainfo->reg);
} else if (ainfo->regtype == RegTypeBase) {
s390_lgr (code, s390_r13, STK_BASE);
s390_aghi (code, s390_r13, alloc_size);
s390_lg (code, inst->dreg, 0, s390_r13, ainfo->offset);
} else
g_assert_not_reached ();
if (cfg->verbose_level > 2)
g_print ("Argument %d assigned to register %s\n",
pos, mono_arch_regname (inst->dreg));
} else {
if (ainfo->regtype == RegTypeGeneral) {
if (!((ainfo->reg >= 2) && (ainfo->reg <= 6)))
g_assert_not_reached();
switch (ainfo->size) {
case 1:
s390_stc (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
case 2:
s390_sth (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
case 4:
s390_st (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
case 8:
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
break;
}
} else if (ainfo->regtype == RegTypeBase) {
} else if (ainfo->regtype == RegTypeFP) {
s390_std (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
} else if (ainfo->regtype == RegTypeFPR4) {
s390_ste (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
} else if (ainfo->regtype == RegTypeStructByVal) {
int doffset = inst->inst_offset;
size = (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE && sig->pinvoke && !sig->marshalling_disabled
? mono_class_native_size(mono_class_from_mono_type_internal (inst->inst_vtype), NULL)
: ainfo->size);
switch (size) {
case 1:
if (ainfo->reg != STK_BASE)
s390_stc (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
case 2:
if (ainfo->reg != STK_BASE)
s390_sth (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
case 4:
if (ainfo->reg != STK_BASE)
s390_st (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
case 8:
if (ainfo->reg != STK_BASE)
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, doffset);
break;
default:
if (ainfo->reg != STK_BASE)
s390_stg (code, ainfo->reg, 0, STK_BASE, doffset);
}
} else if (ainfo->regtype == RegTypeStructByAddr) {
s390_stg (code, ainfo->reg, 0, inst->inst_basereg, inst->inst_offset);
} else
g_assert_not_reached ();
}
pos++;
}
if (method->save_lmf) {
/**
* Build the MonoLMF structure on the stack - see mini-s390x.h
*/
lmfOffset = alloc_size - sizeof(MonoLMF);
s390_lgr (code, s390_r13, cfg->frame_reg);
s390_aghi (code, s390_r13, lmfOffset);
/*
* Preserve the parameter registers while we fix up the lmf
*/
s390_stmg (code, s390_r2, s390_r6, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, pregs));
for (i = 0; i < 5; i++)
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, pregs) + i * sizeof(gulong), SLOT_NOREF);
/*
* On return from this call r2 have the address of the &lmf
*/
code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ID,
GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern));
/*
* Set lmf.lmf_addr = jit_tls->lmf
*/
s390_stg (code, s390_r2, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, lmf_addr));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
/*
* Get current lmf
*/
s390_lg (code, s390_r0, 0, s390_r2, 0);
/*
* Set our lmf as the current lmf
*/
s390_stg (code, s390_r13, 0, s390_r2, 0);
/*
* Have our lmf.previous_lmf point to the last lmf
*/
s390_stg (code, s390_r0, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, previous_lmf));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
/*
* Save method info
*/
S390_SET (code, s390_r1, method);
s390_stg (code, s390_r1, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, method));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF);
/*
* Save the current IP
*/
s390_stg (code, STK_BASE, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, ebp));
s390_basr (code, s390_r1, 0);
s390_stg (code, s390_r1, 0, s390_r13, MONO_STRUCT_OFFSET(MonoLMF, eip));
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, ebp), SLOT_NOREF);
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, eip), SLOT_NOREF);
/*
* Save general and floating point registers
*/
s390_stmg (code, s390_r2, s390_r12, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, gregs) + 2 * sizeof(gulong));
for (i = 0; i < 11; i++)
mini_gc_set_slot_type_from_fp (cfg, lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, gregs) + i * sizeof(gulong), SLOT_NOREF);
fpOffset = lmfOffset + MONO_STRUCT_OFFSET (MonoLMF, fregs);
for (i = 0; i < 16; i++) {
s390_std (code, i, 0, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, fregs) + i * sizeof(gulong));
mini_gc_set_slot_type_from_fp (cfg, fpOffset, SLOT_NOREF);
fpOffset += sizeof(double);
}
/*
* Restore the parameter registers now that we've set up the lmf
*/
s390_lmg (code, s390_r2, s390_r6, s390_r13,
MONO_STRUCT_OFFSET(MonoLMF, pregs));
}
if (cfg->method->save_lmf)
argsClobbered = TRUE;
/*
* Optimize the common case of the first bblock making a call with the same
* arguments as the method. This works because the arguments are still in their
* original argument registers.
*/
if (!argsClobbered) {
MonoBasicBlock *first_bb = cfg->bb_entry;
MonoInst *next;
int filter = FILTER_IL_SEQ_POINT;
next = mono_bb_first_inst (first_bb, filter);
if (!next && first_bb->next_bb) {
first_bb = first_bb->next_bb;
next = mono_bb_first_inst (first_bb, filter);
}
if (first_bb->in_count > 1)
next = NULL;
for (i = 0; next && i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gboolean match = FALSE;
inst = cfg->args [i];
if (inst->opcode != OP_REGVAR) {
switch (ainfo->regtype) {
case RegTypeGeneral: {
if (((next->opcode == OP_LOAD_MEMBASE) ||
(next->opcode == OP_LOADI4_MEMBASE)) &&
next->inst_basereg == inst->inst_basereg &&
next->inst_offset == inst->inst_offset) {
if (next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
} else {
next->opcode = OP_MOVE;
next->sreg1 = ainfo->reg;
/* Only continue if the instruction doesn't change argument regs */
if (next->dreg == ainfo->reg)
match = TRUE;
}
}
break;
}
default:
break;
}
} else {
/* Argument allocated to (non-volatile) register */
switch (ainfo->regtype) {
case RegTypeGeneral:
if (next->opcode == OP_MOVE &&
next->sreg1 == inst->dreg &&
next->dreg == ainfo->reg) {
NULLIFY_INS (next);
match = TRUE;
}
break;
default:
break;
}
}
if (match) {
next = mono_inst_next (next, filter);
if (!next)
break;
}
}
}
if (cfg->gen_sdb_seq_points) {
MonoInst *seq;
/* Initialize ss_tramp_var */
seq = cfg->arch.ss_tramp_var;
g_assert (seq->opcode == OP_REGOFFSET);
S390_SET (code, s390_r1, (guint64) &ss_trampoline);
s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset);
/* Initialize bp_tramp_var */
seq = cfg->arch.bp_tramp_var;
g_assert (seq->opcode == OP_REGOFFSET);
S390_SET (code, s390_r1, (guint64) &bp_trampoline);
s390_stg (code, s390_r1, 0, seq->inst_basereg, seq->inst_offset);
}
set_code_cursor (cfg, code);
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecutre-specific epilog generation
*
* @param[in] @cfg - Compile control block
*
* Create the instruction sequence for exit from a method
*/
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
guint8 *code;
int max_epilog_size = 96, i;
int fpOffset = 0;
if (cfg->method->save_lmf)
max_epilog_size += 128;
code = realloc_code (cfg, max_epilog_size);
cfg->has_unwind_info_for_epilog = TRUE;
/* Mark the start of the epilog */
mono_emit_unwind_op_mark_loc (cfg, code, 0);
/* Save the uwind state which is needed by the out-of-line code */
mono_emit_unwind_op_remember_state (cfg, code);
if (method->save_lmf)
restoreLMF(code, cfg->frame_reg, cfg->stack_usage);
code = backUpStackPtr(cfg, code);
mono_emit_unwind_op_def_cfa (cfg, code, STK_BASE, S390_CFA_OFFSET);
mono_emit_unwind_op_same_value (cfg, code, STK_BASE);
if (cfg->arch.fpSize != 0) {
fpOffset = -cfg->arch.fpSize;
for (int i=8; i<16; i++) {
if (cfg->arch.used_fp_regs & (1 << i)) {
s390_ldy (code, i, 0, STK_BASE, fpOffset);
mono_emit_unwind_op_same_value (cfg, code, 16+i);
fpOffset += sizeof(double);
}
}
}
s390_lmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
for (i = s390_r6; i < s390_r15; i++)
mono_emit_unwind_op_same_value (cfg, code, i);
s390_br (code, s390_r14);
/* Restore the unwind state to be the same as before the epilog */
mono_emit_unwind_op_restore_state (cfg, code);
/* Round up for start of any thunk entries */
code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3);
set_code_cursor (cfg, code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific exception emission
*
* @param[in] @cfg - Compile control block
*
* Create the instruction sequence for exception handling
*/
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
guint8 *code;
int nThrows = 0,
exc_count = 0,
iExc;
guint32 code_size;
MonoClass *exc_classes [MAX_EXC];
guint8 *exc_throw_start [MAX_EXC];
for (patch_info = cfg->patch_info;
patch_info;
patch_info = patch_info->next) {
if (patch_info->type == MONO_PATCH_INFO_EXC)
exc_count++;
}
code_size = exc_count * 48;
code = realloc_code (cfg, code_size);
/*
* Add code to raise exceptions
*/
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC: {
guint8 *ip = patch_info->ip.i + cfg->native_code;
MonoClass *exc_class;
/*
* Patch the branch in epilog to come here
*/
s390_patch_rel (ip + 2, (guint64) S390_RELATIVE(code,ip));
exc_class = mono_class_load_from_name (mono_defaults.corlib,
"System",
patch_info->data.name);
for (iExc = 0; iExc < nThrows; ++iExc)
if (exc_classes [iExc] == exc_class)
break;
if (iExc < nThrows) {
s390_jcl (code, S390_CC_UN,
(guint64) exc_throw_start [iExc]);
patch_info->type = MONO_PATCH_INFO_NONE;
} else {
if (nThrows < MAX_EXC) {
exc_classes [nThrows] = exc_class;
exc_throw_start [nThrows] = code;
}
/*
* Patch the parameter passed to the handler
*/
S390_SET (code, s390_r2, m_class_get_type_token (exc_class));
/*
* Load return address & parameter register
*/
s390_larl (code, s390_r14, (guint64)S390_RELATIVE((patch_info->ip.i +
cfg->native_code + 8), code));
/*
* Reuse the current patch to set the jump
*/
patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
patch_info->ip.i = code - cfg->native_code;
patch_info->relocation = MONO_R_S390_THUNKED;
S390_BR_TEMPLATE (code, s390_r1);
cfg->thunk_area += THUNK_SIZE;
}
break;
}
default:
/* do nothing */
break;
}
}
/* Round up for start of any thunk entries */
code = (guint8 *) ((((uintptr_t) code + 7) >> 3) << 3);
set_code_cursor (cfg, code);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific finishing of initialization
*
* Perform any architectural-specific operations at the conclusion of
* the initialization phase
*/
void
mono_arch_finish_init (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific instruction emission for method
*
* @param[in] @cfg - Compile Control block
* @param[in] @cmethod - Current method
* @param[in] @fsig - Method signature
* @param[in] @args - Arguments to method
* @returns Instruction(s) required for architecture
*
* Provide any architectural shortcuts for specific methods.
*/
MonoInst *
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
MonoInst *ins = NULL;
int opcode = 0;
MonoStackType stack_type = STACK_R8;
if (cmethod->klass == mono_class_try_get_math_class ()) {
// unary double
if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R8) {
if (strcmp (cmethod->name, "Abs") == 0) {
opcode = OP_ABS;
} else if (strcmp (cmethod->name, "Ceiling") == 0) {
opcode = OP_CEIL;
} else if (strcmp (cmethod->name, "Floor") == 0) {
opcode = OP_FLOOR;
} else if (strcmp (cmethod->name, "Round") == 0) {
opcode = OP_ROUND;
} else if (strcmp (cmethod->name, "Sqrt") == 0) {
opcode = OP_SQRT;
} else if (strcmp (cmethod->name, "Truncate") == 0) {
opcode = OP_TRUNC;
}
}
// unary float (overloaded)
else if (fsig->param_count == 1 && fsig->params [0]->type == MONO_TYPE_R4) {
if (strcmp (cmethod->name, "Abs") == 0) {
if (cfg->r4fp) {
opcode = OP_ABSF;
stack_type = STACK_R4;
} else {
opcode = OP_ABS;
}
}
}
// binary double
else if (fsig->param_count == 2 && fsig->params [0]->type == MONO_TYPE_R8 && fsig->params [1]->type == MONO_TYPE_R8) {
if (strcmp (cmethod->name, "CopySign") == 0) {
opcode = OP_FCOPYSIGN;
}
}
} else if (cmethod->klass == mono_class_try_get_mathf_class ()) {
if (fsig->param_count == 1) {
stack_type = STACK_R4;
if (strcmp (cmethod->name, "Abs") == 0) {
if (cfg->r4fp) {
opcode = OP_ABSF;
stack_type = STACK_R4;
} else {
opcode = OP_ABS;
}
} else if (strcmp (cmethod->name, "Ceiling") == 0) {
if (cfg->r4fp) {
opcode = OP_CEILF;
stack_type = STACK_R4;
} else {
opcode = OP_CEIL;
}
} else if (strcmp (cmethod->name, "Floor") == 0) {
if (cfg->r4fp) {
opcode = OP_FLOORF;
stack_type = STACK_R4;
} else {
opcode = OP_FLOOR;
}
} else if (strcmp (cmethod->name, "Sqrt") == 0) {
if (cfg->r4fp) {
opcode = OP_SQRTF;
stack_type = STACK_R4;
} else {
opcode = OP_SQRT;
}
} else if (strcmp (cmethod->name, "Truncate") == 0) {
if (cfg->r4fp) {
opcode = OP_TRUNCF;
stack_type = STACK_R4;
} else {
opcode = OP_TRUNC;
}
opcode = OP_TRUNCF;
}
}
}
if (opcode) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = stack_type;
ins->dreg = mono_alloc_freg (cfg);
ins->sreg1 = args [0]->dreg;
if (fsig->param_count > 1) {
ins->sreg2 = args [1]->dreg;
}
g_assert (fsig->param_count <= 2);
MONO_ADD_INS (cfg->cbb, ins);
}
return ins;
}
/*========================= End of Function ========================*/
/**
*
* @brief Decompose opcode into a System z operation
*
* @param[in] @cfg - Compile Control block
* @param[in] @ins - Mono Instruction
*
* Substitute a System z instruction for a Mono operation.
*/
void
mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
{
/*
* Have to rename these to avoid being decomposed normally, since the normal
* decomposition does not work on S390.
*/
switch (ins->opcode) {
case OP_ISUB_OVF:
ins->opcode = OP_S390_ISUB_OVF;
break;
case OP_ISUB_OVF_UN:
ins->opcode = OP_S390_ISUB_OVF_UN;
break;
case OP_IADD_OVF:
ins->opcode = OP_S390_IADD_OVF;
break;
case OP_IADD_OVF_UN:
ins->opcode = OP_S390_IADD_OVF_UN;
break;
case OP_LADD_OVF:
ins->opcode = OP_S390_LADD_OVF;
break;
case OP_LADD_OVF_UN:
ins->opcode = OP_S390_LADD_OVF_UN;
break;
case OP_LSUB_OVF:
ins->opcode = OP_S390_LSUB_OVF;
break;
case OP_LSUB_OVF_UN:
ins->opcode = OP_S390_LSUB_OVF_UN;
break;
default:
break;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Determine the cost of allocation a variable
*
* @param[in] @cfg - Compile Control block
* @param[in] @vmv - Mono Method Variable
* @returns Cost (hardcoded on s390x to 2)
*
* Determine the cost, in the number of memory references, of the action
* of allocating the variable VMV into a register during global register
* allocation.
*
*/
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
/* FIXME: */
return 2;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific register window flushing
*
* Not applicable for s390x so we just do nothing
*
*/
void
mono_arch_flush_register_windows (void)
{
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific check if value may be immediate
*
* @param[in] @opcode - Operation code
* @param[in] @imm_opcode - Immediate operation code
* @param[in] @imm - Value to be examined
* @returns True if it is a valid immediate value
*
* Determine if operand qualifies as an immediate value. For s390x
* this is a value in the range -2**32/2**32-1
*
*/
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return s390_is_imm32 (imm);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific patch offset value for AOT
*
* @param[in] @code - Location of code to check
* @returns Offset
*
* Dummy entry point if/when s390x supports AOT.
*/
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return 0;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific returning of register from context
*
* @param[in] @ctx - Mono context
* @param[in] @reg - Register number to be returned
* @returns Contents of the register from the context
*
* Return a register from the context.
*/
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->uc_mcontext.gregs[reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->uc_mcontext.gregs[reg];
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific setting of a register in the context
*
* @param[in] @ctx - Mono context
* @param[in] @reg - Register number to be returned
* @param[in] @val - Value to be set
*
* Set the specified register in the context with the value passed
*/
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->uc_mcontext.gregs[reg] = val;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architectural specific returning of the "this" value from context
*
* @param[in] @ctx - Mono context
* @param[in] @code - Current location
* @returns Pointer to the "this" object
*
* Extract register 2 from the context as for s390x this is where the
* this parameter is passed
*/
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer) regs [s390_r2];
}
/*========================= End of Function ========================*/
/**
*
* @brief Delegation trampoline processing
*
* @param[in] @info - Trampoline information
* @param[in] @has_target - Use target from delegation
* @param[in] @param_count - Count of parameters
* @param[in] @aot - AOT indicator
* @returns Next instruction location
*
* Process the delegation trampolines
*/
static guint8 *
get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, MonoMethodSignature *sig, gboolean aot)
{
guint8 *code, *start;
if (has_target) {
int size = 32;
start = code = (guint8 *) mono_global_codeman_reserve (size);
/* Replace the this argument with the target */
s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
s390_lg (code, s390_r2, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, target));
s390_br (code, s390_r1);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
} else {
int size, i, offset = S390_MINIMAL_STACK_SIZE, iReg = s390_r2;
CallInfo *cinfo = get_call_info (NULL, sig);
size = 32 + sig->param_count * 8;
start = code = (guint8 *) mono_global_codeman_reserve (size);
s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
/* slide down the arguments */
for (i = 0; i < sig->param_count; ++i) {
switch(cinfo->args[i].regtype) {
case RegTypeGeneral :
if (iReg < S390_LAST_ARG_REG) {
s390_lgr (code, iReg, (iReg + 1));
} else {
s390_lg (code, iReg, 0, STK_BASE, offset);
}
iReg++;
break;
default :
s390_mvc (code, sizeof(uintptr_t), STK_BASE, offset, STK_BASE, offset+sizeof(uintptr_t));
offset += sizeof(uintptr_t);
}
}
s390_br (code, s390_r1);
g_free (cinfo);
g_assert ((code - start) <= size);
mono_arch_flush_icache (start, size);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
}
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, NULL);
} else {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
*info = mono_tramp_info_create (name, start, code - start, NULL, NULL);
g_free (name);
}
return start;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific delegation trampolines processing
*
* @returns List of trampolines
*
* Return a list of MonoTrampInfo structures for the delegate invoke impl trampolines.
*/
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
MonoTrampInfo *info;
get_delegate_invoke_impl (&info, TRUE, 0, TRUE);
res = g_slist_prepend (res, info);
#if 0
for (int i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
get_delegate_invoke_impl (&info, FALSE, NULL, TRUE);
res = g_slist_prepend (res, info);
}
#endif
return res;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific delegation trampoline processing
*
* @param[in] @sig - Method signature
* @param[in] @has_target - Whether delegation contains a target
* @returns Trampoline
*
* Return a pointer to a delegation trampoline
*/
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
guint8 *code, *start;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
/* FIXME: Support more cases */
if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret)))
return NULL;
if (has_target) {
static guint8* cached = NULL;
if (cached)
return cached;
if (mono_ee_features.use_aot_trampolines) {
start = (guint8 *) mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, TRUE, sig, FALSE);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cached = start;
} else {
static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i;
if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
return NULL;
for (i = 0; i < sig->param_count; ++i)
if (!mono_is_regsize_var (sig->params [i]))
return NULL;
code = cache [sig->param_count];
if (code)
return code;
if (mono_ee_features.use_aot_trampolines) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
start = (guint8 *) mono_aot_get_trampoline (name);
g_free (name);
} else {
MonoTrampInfo *info;
start = get_delegate_invoke_impl (&info, FALSE, sig, FALSE);
mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
cache [sig->param_count] = start;
}
return start;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific delegation virtual trampoline processing
*
* @param[in] @sig - Method signature
* @param[in] @method - Method
* @param[in] @offset - Offset into vtable
* @param[in] @load_imt_reg - Whether to load the LMT register
* @returns Trampoline
*
* Return a pointer to a delegation virtual trampoline
*/
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method,
int offset, gboolean load_imt_reg)
{
guint8 *code, *start;
int size = 40;
start = code = (guint8 *) mono_global_codeman_reserve (size);
/*
* Replace the "this" argument with the target
*/
s390_lgr (code, s390_r1, s390_r2);
s390_lg (code, s390_r2, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, target));
/*
* Load the IMT register, if needed
*/
if (load_imt_reg) {
s390_lg (code, MONO_ARCH_IMT_REG, 0, s390_r1, MONO_STRUCT_OFFSET(MonoDelegate, method));
}
/*
* Load the vTable
*/
s390_lg (code, s390_r1, 0, s390_r2, MONO_STRUCT_OFFSET(MonoObject, vtable));
if (offset != 0) {
s390_agfi(code, s390_r1, offset);
}
s390_lg (code, s390_r1, 0, s390_r1, 0);
s390_br (code, s390_r1);
mono_arch_flush_icache (start, code - start);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
return(start);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific build of IMT trampoline
*
* @param[in] @vtable - Mono VTable
* @param[in] @domain - Mono Domain
* @param[in] @imt_entries - List of IMT check items
* @param[in] @count - Count of items
* @param[in] @fail_tramp - Pointer to a failure trampoline
* @returns Trampoline
*
* Return a pointer to an IMT trampoline
*/
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable,
MonoIMTCheckItem **imt_entries, int count,
gpointer fail_tramp)
{
int i;
int size = 0;
guchar *code, *start;
MonoMemoryManager *mem_manager = m_class_get_mem_manager (vtable->klass);
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done)
item->chunk_size += CMP_SIZE + JUMP_SIZE;
if (item->has_target_code)
item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE;
else
item->chunk_size += BR_SIZE + JUMP_SIZE + LOADCON_SIZE +
LOAD_SIZE;
} else {
if (fail_tramp) {
item->chunk_size += CMP_SIZE + 2 * BR_SIZE + JUMP_SIZE +
2 * LOADCON_SIZE;
if (!item->has_target_code)
item->chunk_size += LOAD_SIZE;
} else {
item->chunk_size += LOADCON_SIZE + LOAD_SIZE + BR_SIZE;
#if ENABLE_WRONG_METHOD_CHECK
item->chunk_size += CMP_SIZE + JUMP_SIZE;
#endif
}
}
} else {
item->chunk_size += CMP_SIZE + JUMP_SIZE;
imt_entries [item->check_target_idx]->compare_done = TRUE;
}
size += item->chunk_size;
}
if (fail_tramp) {
code = (guint8 *)mini_alloc_generic_virtual_trampoline (vtable, size);
} else {
code = mono_mem_manager_code_reserve (mem_manager, size);
}
start = code;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = (guint8 *) code;
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
S390_SET (code, s390_r0, item->key);
s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG);
}
item->jmp_code = (guint8*) code;
s390_jcl (code, S390_CC_NE, 0);
if (item->has_target_code) {
S390_SET (code, s390_r1, item->value.target_code);
} else {
S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot])));
s390_lg (code, s390_r1, 0, s390_r1, 0);
}
s390_br (code, s390_r1);
} else {
if (fail_tramp) {
gint64 target;
S390_SET (code, s390_r0, item->key);
s390_cgr (code, s390_r0, MONO_ARCH_IMT_REG);
item->jmp_code = (guint8*) code;
s390_jcl (code, S390_CC_NE, 0);
if (item->has_target_code) {
S390_SET (code, s390_r1, item->value.target_code);
} else {
g_assert (vtable);
S390_SET (code, s390_r1,
(&(vtable->vtable [item->value.vtable_slot])));
s390_lg (code, s390_r1, 0, s390_r1, 0);
}
s390_br (code, s390_r1);
target = (gint64) S390_RELATIVE(code, item->jmp_code);
s390_patch_rel(item->jmp_code+2, target);
S390_SET (code, s390_r1, fail_tramp);
s390_br (code, s390_r1);
item->jmp_code = NULL;
} else {
/* enable the commented code to assert on wrong method */
#if ENABLE_WRONG_METHOD_CHECK
g_assert_not_reached ();
#endif
S390_SET (code, s390_r1, (&(vtable->vtable [item->value.vtable_slot])));
s390_lg (code, s390_r1, 0, s390_r1, 0);
s390_br (code, s390_r1);
}
}
} else {
S390_SET (code, s390_r0, item->key);
s390_cgr (code, MONO_ARCH_IMT_REG, s390_r0);
item->jmp_code = (guint8 *) code;
s390_jcl (code, S390_CC_GE, 0);
}
}
/*
* patch the branches to get to the target items
*/
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->jmp_code) {
if (item->check_target_idx) {
gint64 offset;
offset = (gint64) S390_RELATIVE(imt_entries [item->check_target_idx]->code_target,
item->jmp_code);
s390_patch_rel ((guchar *) item->jmp_code + 2, (guint64) offset);
}
}
}
mono_arch_flush_icache ((guint8*)start, (code - start));
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
if (!fail_tramp)
UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
g_assert (code - start <= size);
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), mem_manager);
return (start);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return of pointer to IMT method
*
* @param[in] @regs - Context registers
* @param[in] @code - Current location
* @returns Pointer to IMT method
*
* Extract the value of the IMT register from the context
*/
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return ((MonoMethod *) regs [MONO_ARCH_IMT_REG]);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return of pointer static call vtable.
*
* @param[in] @regs - Context registers
* @param[in] @code - Current location
* @returns Pointer to static call vtable
*
* Extract the value of the RGCTX register from the context which
* points to the static call vtable.
*/
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable*)(gsize) regs [MONO_ARCH_RGCTX_REG];
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific return of unwind bytecode for DWARF CIE
*
* @returns Unwind byte code
*
* Returns the unwind bytecode for DWARF CIE
*/
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, 0, 0, STK_BASE, S390_CFA_OFFSET);
return(l);
}
/*========================= End of Function ========================*/
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
/**
*
* @brief Architecture-specific setting of a breakpoint
*
* @param[in] @ji - Mono JIT Information
* @param[in] @ip - Insruction pointer
*
* Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
* The location should contain code emitted by OP_SEQ_POINT.
*/
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *bp = ip;
/* IP should point to a LGHI R1,0 */
g_assert (bp[0] == 0xa7);
/* Replace it with a LGHI R1,1 */
s390_lghi (bp, s390_r1, 1);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific clearing of a breakpoint
*
* @param[in] @ji - Mono JIT Information
* @param[in] @ip - Insruction pointer
*
* Replace the breakpoint with a no-operation.
*/
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *bp = ip;
/* IP should point to a LGHI R1,1 */
g_assert (bp[0] == 0xa7);
/* Replace it with a LGHI R1,0 */
s390_lghi (bp, s390_r1, 0);
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check if this is a breakpoint event
*
* @param[in] @info - Signal information
* @param[in] @sigctx - Signal context
* @returns True if this is a breakpoint event
*
* We use soft breakpoints so always return FALSE
*/
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
/* We use soft breakpoints on s390x */
return FALSE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific skip of a breakpoint
*
* @param[in] @ctx - Mono Context
* @param[in] @ji - Mono JIT information
*
* We use soft breakpoints so this is a no-op
*/
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
g_assert_not_reached ();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific start of single stepping
*
* Unprotect the trigger page to enable single stepping
*/
void
mono_arch_start_single_stepping (void)
{
ss_trampoline = mini_get_single_step_trampoline();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific stop of single stepping
*
* Write-protect the trigger page to disable single stepping
*/
void
mono_arch_stop_single_stepping (void)
{
ss_trampoline = NULL;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific check if single stepping event
*
* @param[in] @info - Signal information
* @param[in] @sigctx - Signal context
* @returns True if this is a single stepping event
*
* Return whether the machine state in sigctx corresponds to a single step event.
* On s390x we use soft breakpoints so return FALSE
*/
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
/* We use soft breakpoints on s390x */
return FALSE;
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific skip of a single stepping event
*
* @param[in] @ctx - Mono Context
*
* Modify the ctx so the IP is placed after the single step trigger
* instruction, so that the instruction is not executed again.
* On s390x we use soft breakpoints so we shouldn't get here
*/
void
mono_arch_skip_single_step (MonoContext *ctx)
{
g_assert_not_reached();
}
/*========================= End of Function ========================*/
/**
*
* @brief Architecture-specific creation of sequence point information
*
* @param[in] @domain - Mono Domain
* @param[in] @code - Current location pointer
* @returns Sequence Point Information
*
* Return a pointer to a data struction which is used by the sequence
* point implementation in AOTed code. A no-op on s390x until AOT is
* ever supported.
*/
SeqPointInfo *
mono_arch_get_seq_point_info (guint8 *code)
{
SeqPointInfo *info;
MonoJitInfo *ji;
MonoJitMemoryManager *jit_mm;
jit_mm = get_default_jit_mm ();
jit_mm_lock (jit_mm);
info = (SeqPointInfo *)g_hash_table_lookup (jit_mm->arch_seq_points, code);
jit_mm_unlock (jit_mm);
if (!info) {
ji = mini_jit_info_table_find (code);
g_assert (ji);
// FIXME: Optimize the size
info = (SeqPointInfo *)g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer)));
info->ss_tramp_addr = &ss_trampoline;
jit_mm_lock (jit_mm);
g_hash_table_insert (jit_mm->arch_seq_points, code, info);
jit_mm_unlock (jit_mm);
}
return info;
}
/*========================= End of Function ========================*/
#endif
/**
*
* @brief Architecture-specific check of supported operation codes
*
* @param[in] @opcode - Operation code to be checked
* @returns True if operation code is supported
*
* Check if a mono operation is supported in hardware.
*/
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
return TRUE;
default:
return FALSE;
}
}
/*========================= End of Function ========================*/
#ifndef DISABLE_JIT
/**
*
* @brief Architecture-specific check of tailcall support
*
* @param[in] @cfg - Mono Compile control block
* @param[in] @caller_sig - Signature of caller
* @param[in] @callee_sig - Signature of callee
* @param[in] @virtual_ - Whether this a virtual call
* @returns True if the tailcall operation is supported
*
* Check if a tailcall may be made from caller to callee based on a
* number of conditions including parameter types and stack sizes
*/
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
g_assert (caller_sig);
g_assert (callee_sig);
CallInfo *caller_info = get_call_info (NULL, caller_sig);
CallInfo *callee_info = get_call_info (NULL, callee_sig);
gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage);
// Any call that would result in parameters being placed on the stack cannot be "tailed" as it may
// result in the callers parameter variables being overwritten.
ArgInfo const * const ainfo = callee_info->args + callee_sig->hasthis;
for (int i = 0; res && i < callee_sig->param_count; ++i) {
switch(ainfo[i].regtype) {
case RegTypeGeneral :
// R6 is both used as argument register and call-saved
// This means we cannot use a tail call if R6 is needed
if (ainfo[i].reg == S390_LAST_ARG_REG)
res = FALSE;
else
res = TRUE;
break;
case RegTypeFP :
case RegTypeFPR4 :
case RegTypeStructByValInFP :
res = TRUE;
break;
case RegTypeBase :
res = FALSE;
break;
case RegTypeStructByAddr :
if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG)
res = FALSE;
else
res = TRUE;
break;
case RegTypeStructByVal :
if (ainfo[i].reg == STK_BASE || ainfo[i].reg == S390_LAST_ARG_REG)
res = FALSE;
else {
switch(ainfo[i].size) {
case 0: case 1: case 2: case 4: case 8:
res = TRUE;
break;
default:
res = FALSE;
}
}
break;
}
}
g_free (caller_info);
g_free (callee_info);
return(res);
}
/*========================= End of Function ========================*/
#endif
/**
*
* @brief Architecture-specific load function
*
* @param[in] @jit_call_id - JIT callee identifier
* @returns Pointer to load function trampoline
*
* A no-operation on s390x until if/when it supports AOT.
*/
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
return NULL;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit call to thunked code
*
* @param[in] @cfg - configuration data
* @param[inout] @code - where to emit call
* @param[in] @call - call instruction
* @returns Pointer to next code area
*
*/
static __inline__ guint8*
emit_call (MonoCompile *cfg, guint8 *code, MonoJumpInfoType type, gconstpointer target)
{
mono_add_patch_info_rel (cfg, code-cfg->native_code, type,
target, MONO_R_S390_THUNKED);
S390_CALL_TEMPLATE (code, s390_r14);
cfg->thunk_area += THUNK_SIZE;
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Emit thunk for an indirect call
*
* @param[inout] @code - where to emit thunk
* @param[in] @target - thunk target
* @returns Pointer to next code area
*
*/
static guint8*
emit_thunk (guint8 *code, gconstpointer target)
{
*(guint64*)code = (guint64)target;
code += sizeof (guint64);
return code;
}
/*========================= End of Function ========================*/
/**
*
* @brief Create thunk
*
* @param[in] @cfg - Compiler configuration
* @param[inout] @code - where to emit thunk
* @param[in] @target - thunk target
*
* Create a new thunk
*
*/
static void
create_thunk (MonoCompile *cfg, guint8 *ip, guint8 *code, gpointer target)
{
guint8 *thunks;
int thunks_size;
/*
* This can be called multiple times during JITting,
* save the current position in cfg->arch to avoid
* doing a O(n^2) search.
*/
if (!cfg->arch.thunks) {
cfg->arch.thunks = cfg->thunks;
cfg->arch.thunks_size = cfg->thunk_area;
}
thunks = (guint8 *) cfg->arch.thunks;
thunks_size = cfg->arch.thunks_size;
if (!thunks_size) {
g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
g_assert_not_reached ();
}
g_assert (*(guint64 *)thunks == 0);
emit_thunk (thunks, target);
cfg->arch.thunks += THUNK_SIZE;
cfg->arch.thunks_size -= THUNK_SIZE;
S390_EMIT_CALL(ip, thunks);
}
/*========================= End of Function ========================*/
/**
*
* @brief Update thunk
*
* @param[in] @cfg - Compiler configuration
* @param[inout] @code - where to emit thunk
* @param[in] @target - thunk target
*
* Update an existing thunk
*
*/
static void
update_thunk (MonoCompile *cfg, guint8 *code, gpointer target)
{
MonoJitInfo *ji;
MonoThunkJitInfo *info;
guint8 *thunks;
guint8 *orig_target;
guint8 *target_thunk;
int thunks_size;
ji = mini_jit_info_table_find ((char*)code);
g_assert (ji);
info = mono_jit_info_get_thunk_info (ji);
g_assert (info);
thunks = (guint8*)ji->code_start + info->thunks_offset;
thunks_size = info->thunks_size;
/*
* We're pointing at the start of jump to thunk,
* but mono_arch_get_call_target expects we're pointing
* after the branch so we adjust
*/
orig_target = mono_arch_get_call_target (code + 6);
target_thunk = NULL;
if (orig_target >= thunks && orig_target < thunks + thunks_size) {
/* The call already points to a thunk, because of trampolines etc. */
target_thunk = orig_target;
} else {
g_print ("thunk failed %p->%p, thunk space=%d method %s",
code, target, thunks_size,
cfg ? mono_method_full_name (cfg->method, TRUE)
: mono_method_full_name (jinfo_get_method (ji), TRUE));
g_assert_not_reached ();
}
emit_thunk (target_thunk, target);
}
/*========================= End of Function ========================*/
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-s390x.h | /**
* \file
*/
#ifndef __MONO_MINI_S390X_H__
#define __MONO_MINI_S390X_H__
#include <mono/arch/s390x/s390x-codegen.h>
#include <mono/utils/mono-context.h>
#include <signal.h>
#define MONO_ARCH_CPU_SPEC mono_s390x_cpu_desc
#define MONO_MAX_IREGS 16
#define MONO_MAX_FREGS 16
/*-------------------------------------------*/
/* Parameters used by the register allocator */
/*-------------------------------------------*/
struct MonoLMF {
gpointer previous_lmf;
gpointer lmf_addr;
MonoMethod *method;
gulong ebp;
gulong eip;
gulong pregs[6];
gulong gregs[16];
gdouble fregs[16];
};
/**
* Platform-specific compile control information
*/
typedef struct MonoCompileArch {
int bkchain_reg; /** Register being used as stack backchain */
uint32_t used_fp_regs; /** Floating point register use mask */
int fpSize; /** Size of floating point save area */
MonoInst *ss_tramp_var; /** Single-step variable */
MonoInst *bp_tramp_var; /** Breakpoint variable */
CallInfo *cinfo; /** Caller information */
guint8 *thunks; /** Thunking area */
int thunks_size; /** Size of thunking area */
} MonoCompileArch;
typedef struct
{
void *prev;
void *unused[5];
void *regs[8];
void *return_address;
} MonoS390StackFrame;
/* Structure used by the sequence points */
struct SeqPointInfo {
gpointer ss_tramp_addr;
gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
};
#define MONO_ARCH_SIGSEGV_ON_ALTSTACK 1
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS 1
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS 1
#define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_SIGNAL_STACK_SIZE 256*1024
#define MONO_ARCH_HAVE_DECOMPOSE_OPTS 1
#define MONO_ARCH_IMT_REG s390_r9
#define MONO_ARCH_VTABLE_REG S390_FIRST_ARG_REG
#define MONO_ARCH_RGCTX_REG MONO_ARCH_IMT_REG
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_USE_SIGACTION 1
#define MONO_ARCH_GC_MAPS_SUPPORTED 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1
#define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_TRACK_FPREGS 1
#define MONO_ARCH_HAVE_OPTIMIZED_DIV 1
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#define S390_STACK_ALIGNMENT 8
#define S390_FIRST_ARG_REG s390_r2
#define S390_LAST_ARG_REG s390_r6
#define S390_FIRST_FPARG_REG s390_f0
#define S390_LAST_FPARG_REG s390_f6
#define S390_FP_SAVE_MASK 0xf0
/* Thunk: 8 byte pointer */
#define THUNK_SIZE 8
/* Relocation types */
#define MONO_R_S390_RELINS 1 /* JGxx - relative jump */
#define MONO_R_S390_THUNKED 2 /* Thunked call */
#define MONO_R_S390_DIRECT 3 /* Direct call */
#define MONO_R_S390_ADDR 4 /* Address */
#define MONO_R_S390_SWITCH 5 /* Switch */
#define MONO_R_S390_REL 6 /* Relative displacement */
/*===============================================*/
/* Definitions used by mini-codegen.c */
/*===============================================*/
/*------------------------------------------------------*/
/* use s390_r2-s390_r6 as parm registers */
/* s390_r0, s390_r1, s390_r12, s390_r13 used internally */
/* s390_r8..s390_r10 are used for global regalloc */
/* -- except for s390_r9 which is used as IMT pointer */
/* s390_r11 is sometimes used as the frame pointer */
/* s390_r15 is the stack pointer */
/*------------------------------------------------------*/
#define MONO_ARCH_CALLEE_REGS (0x00fc)
#define MONO_ARCH_CALLEE_SAVED_REGS 0xfd00
/*----------------------------------------*/
/* use s390_f1/s390_f3-s390_f15 as temps */
/*----------------------------------------*/
#define MONO_ARCH_CALLEE_FREGS (0xfffe)
#define MONO_ARCH_CALLEE_SAVED_FREGS 0
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_FIXED_REG(desc) ((desc == 'o') ? s390_r2 : \
((desc == 'g') ? s390_f0 : \
((desc == 'A') ? S390_FIRST_ARG_REG : -1)))
#define MONO_ARCH_INST_IS_FLOAT(desc) ((desc == 'f') || (desc == 'g'))
#define MONO_ARCH_INST_SREG2_MASK(ins) (0)
#define MONO_ARCH_INST_IS_REGPAIR(desc) (0)
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hr) -1
#define MONO_ARCH_IS_GLOBAL_IREG(reg) 0
#define MONO_ARCH_FRAME_ALIGNMENT 8
#define MONO_ARCH_CODE_ALIGNMENT 32
/*-----------------------------------------------*/
/* SIMD Related Definitions */
/*-----------------------------------------------*/
#define MONO_MAX_XREGS 31
#define MONO_ARCH_CALLEE_XREGS 0x0
#define MONO_ARCH_CALLEE_SAVED_XREGS 0x0
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0 // FIXME?
/*-----------------------------------------------*/
/* Macros used to generate instructions */
/*-----------------------------------------------*/
#define S390_OFFSET(b, t) (guchar *) ((guint64) (b) - (guint64) (t))
#define S390_RELATIVE(b, t) (guchar *) ((((guint64) (b) - (guint64) (t))) / 2)
#define CODEPTR(c, o) (o) = (short *) ((guint64) c - 2)
#define PTRSLOT(c, o) *(o) = (short) ((guint64) c - (guint64) (o) + 2)/2
#define S390_CC_EQ 8
#define S390_ALIGN(v, a) (((a) > 0 ? (((v) + ((a) - 1)) & ~((a) - 1)) : (v)))
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,func) do { \
void *sp = __builtin_frame_address (0); \
MONO_CONTEXT_SET_BP ((ctx), sp); \
MONO_CONTEXT_SET_SP ((ctx), sp); \
MONO_CONTEXT_SET_IP ((ctx), func); \
} while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf) do { (lmf)->ebp = -1; } while (0)
/**
*
* @brief Patch the code with a given offset
* @param[in] @code - Area to patch
* @param[in] @target - Value to patch with
*
*/
static void inline
s390_patch_rel (guchar *code, guint64 target)
{
guint32 *offset = (guint32 *) code;
if (target != 0) {
*offset = (guint32) target;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Patch the code with a given address
* @param[in] @code - Area to patch
* @param[in] @target - Address to patch with
*
*/
static void inline
s390_patch_addr (guchar *code, guint64 target)
{
guint64 *offset = (guint64 *) code;
if (target != 0) {
*offset = target;
}
}
/*========================= End of Function ========================*/
/*------------------------------------------------------------------*/
/* */
/* Name - restoreLMF */
/* */
/* Function - Restore the LMF state prior to exiting a method. */
/* */
/*------------------------------------------------------------------*/
#define restoreLMF(code, frame_reg, stack_usage) do \
{ \
int lmfOffset = 0; \
\
s390_lgr (code, s390_r13, frame_reg); \
\
lmfOffset = stack_usage - sizeof(MonoLMF); \
\
/*-------------------------------------------------*/ \
/* r13 = my lmf */ \
/*-------------------------------------------------*/ \
s390_aghi (code, s390_r13, lmfOffset); \
\
/*-------------------------------------------------*/ \
/* r6 = &jit_tls->lmf */ \
/*-------------------------------------------------*/ \
s390_lg (code, s390_r6, 0, s390_r13, \
G_STRUCT_OFFSET(MonoLMF, lmf_addr)); \
\
/*-------------------------------------------------*/ \
/* r0 = lmf.previous_lmf */ \
/*-------------------------------------------------*/ \
s390_lg (code, s390_r0, 0, s390_r13, \
G_STRUCT_OFFSET(MonoLMF, previous_lmf)); \
\
/*-------------------------------------------------*/ \
/* jit_tls->lmf = previous_lmf */ \
/*-------------------------------------------------*/ \
s390_lg (code, s390_r13, 0, s390_r6, 0); \
s390_stg (code, s390_r0, 0, s390_r6, 0); \
} while (0)
/*========================= End of Function ========================*/
#define S390_SET(loc, dr, v) \
do { \
guint64 val = (guint64) v; \
if (s390_is_imm16(val)) { \
s390_lghi(loc, dr, val); \
} else if (s390_is_uimm16(val)) { \
s390_llill(loc, dr, val); \
} else if (s390_is_imm32(val)) { \
s390_lgfi(loc, dr, val); \
} else if (s390_is_uimm32(val)) { \
s390_llilf(loc, dr, val); \
} else { \
guint32 hi = (val) >> 32; \
guint32 lo = (val) & 0xffffffff; \
s390_iihf(loc, dr, hi); \
s390_iilf(loc, dr, lo); \
} \
} while (0)
#define S390_LONG(loc, opy, op, r, ix, br, off) \
if (s390_is_imm20(off)) { \
s390_##opy (loc, r, ix, br, off); \
} else { \
if (ix == 0) { \
S390_SET(loc, s390_r13, off); \
s390_la (loc, s390_r13, s390_r13, br, 0); \
} else { \
s390_la (loc, s390_r13, ix, br, 0); \
S390_SET (loc, s390_r0, off); \
s390_agr (loc, s390_r13, s390_r0); \
} \
s390_##op (loc, r, 0, s390_r13, 0); \
}
#define S390_SET_MASK(loc, dr, v) \
do { \
if (s390_is_imm16 (v)) { \
s390_lghi (loc, dr, v); \
} else if (s390_is_imm32 (v)) { \
s390_lgfi (loc, dr, v); \
} else { \
gint64 val = (gint64) v; \
guint32 hi = (val) >> 32; \
guint32 lo = (val) & 0xffffffff; \
s390_iilf(loc, dr, lo); \
s390_iihf(loc, dr, hi); \
} \
} while (0)
#define S390_CALL_TEMPLATE(loc, r) \
do { \
s390_lgrl (loc, r, 0); \
s390_basr (loc, s390_r14, r); \
} while (0)
#define S390_BR_TEMPLATE(loc, r) \
do { \
s390_lgrl (loc, r, 0); \
s390_br (loc, r); \
} while (0)
#define S390_LOAD_TEMPLATE(loc, r) \
do { \
s390_iihf (loc, r, 0); \
s390_iilf (loc, r, 0); \
} while (0)
#define S390_EMIT_CALL(loc, t) \
do { \
uintptr_t rel; \
uintptr_t p = (uintptr_t) loc; \
rel = ((uintptr_t) t - (uintptr_t) loc) >> 1; \
p += 2; \
*(guint32 *) p = rel; \
} while (0)
#define S390_EMIT_LOAD(loc, v) \
do { \
gint64 val = (gint64) v; \
guint32 hi = (val) >> 32; \
guint32 lo = (val) & 0xffffffff; \
uintptr_t p = (uintptr_t) loc; \
p += 2; \
*(guint32 *) p = hi; \
p += 6; \
*(guint32 *) p = lo; \
} while (0)
#endif /* __MONO_MINI_S390X_H__ */
| /**
* \file
*/
#ifndef __MONO_MINI_S390X_H__
#define __MONO_MINI_S390X_H__
#include <mono/arch/s390x/s390x-codegen.h>
#include <mono/utils/mono-context.h>
#include <signal.h>
#define MONO_ARCH_CPU_SPEC mono_s390x_cpu_desc
#define MONO_MAX_IREGS 16
#define MONO_MAX_FREGS 16
/*-------------------------------------------*/
/* Parameters used by the register allocator */
/*-------------------------------------------*/
struct MonoLMF {
gpointer previous_lmf;
gpointer lmf_addr;
MonoMethod *method;
gulong ebp;
gulong eip;
gulong pregs[6];
gulong gregs[16];
gdouble fregs[16];
};
/**
* Platform-specific compile control information
*/
typedef struct MonoCompileArch {
int bkchain_reg; /** Register being used as stack backchain */
uint32_t used_fp_regs; /** Floating point register use mask */
int fpSize; /** Size of floating point save area */
MonoInst *ss_tramp_var; /** Single-step variable */
MonoInst *bp_tramp_var; /** Breakpoint variable */
CallInfo *cinfo; /** Caller information */
guint8 *thunks; /** Thunking area */
int thunks_size; /** Size of thunking area */
} MonoCompileArch;
typedef struct
{
void *prev;
void *unused[5];
void *regs[8];
void *return_address;
} MonoS390StackFrame;
/* Structure used by the sequence points */
struct SeqPointInfo {
gpointer ss_tramp_addr;
gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
};
#define MONO_ARCH_SIGSEGV_ON_ALTSTACK 1
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS 1
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS 1
#define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_SIGNAL_STACK_SIZE 256*1024
#define MONO_ARCH_HAVE_DECOMPOSE_OPTS 1
#define MONO_ARCH_IMT_REG s390_r9
#define MONO_ARCH_VTABLE_REG S390_FIRST_ARG_REG
#define MONO_ARCH_RGCTX_REG MONO_ARCH_IMT_REG
#define MONO_ARCH_SOFT_DEBUG_SUPPORTED 1
#define MONO_ARCH_HAVE_CONTEXT_SET_INT_REG 1
#define MONO_ARCH_USE_SIGACTION 1
#define MONO_ARCH_GC_MAPS_SUPPORTED 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_MONITOR_ENTER_ADJUSTMENT 1
#define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
#define MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT 1
#define MONO_ARCH_HAVE_SETUP_ASYNC_CALLBACK 1
#define MONO_ARCH_HAVE_TRACK_FPREGS 1
#define MONO_ARCH_HAVE_OPTIMIZED_DIV 1
#define MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE 1
#define MONO_ARCH_HAVE_OP_TAILCALL_REG 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_HAVE_SETUP_RESUME_FROM_SIGNAL_HANDLER_CTX 1
#define MONO_ARCH_HAVE_UNWIND_BACKTRACE 1
#define MONO_ARCH_FLOAT32_SUPPORTED 1
#define S390_STACK_ALIGNMENT 8
#define S390_FIRST_ARG_REG s390_r2
#define S390_LAST_ARG_REG s390_r6
#define S390_FIRST_FPARG_REG s390_f0
#define S390_LAST_FPARG_REG s390_f6
#define S390_FP_SAVE_MASK 0xf0
/* Thunk: 8 byte pointer */
#define THUNK_SIZE 8
/* Relocation types */
#define MONO_R_S390_RELINS 1 /* JGxx - relative jump */
#define MONO_R_S390_THUNKED 2 /* Thunked call */
#define MONO_R_S390_DIRECT 3 /* Direct call */
#define MONO_R_S390_ADDR 4 /* Address */
#define MONO_R_S390_SWITCH 5 /* Switch */
#define MONO_R_S390_REL 6 /* Relative displacement */
/*===============================================*/
/* Definitions used by mini-codegen.c */
/*===============================================*/
/*------------------------------------------------------*/
/* use s390_r2-s390_r6 as parm registers */
/* s390_r0, s390_r1, s390_r12, s390_r13 used internally */
/* s390_r8..s390_r10 are used for global regalloc */
/* -- except for s390_r9 which is used as IMT pointer */
/* s390_r11 is sometimes used as the frame pointer */
/* s390_r15 is the stack pointer */
/*------------------------------------------------------*/
#define MONO_ARCH_CALLEE_REGS (0x00fc)
#define MONO_ARCH_CALLEE_SAVED_REGS 0xfd00
/*----------------------------------------*/
/* use s390_f1/s390_f3-s390_f15 as temps */
/*----------------------------------------*/
#define MONO_ARCH_CALLEE_FREGS (0xfffe)
#define MONO_ARCH_CALLEE_SAVED_FREGS 0
#define MONO_ARCH_USE_FPSTACK FALSE
#define MONO_ARCH_INST_FIXED_REG(desc) ((desc == 'o') ? s390_r2 : \
((desc == 'g') ? s390_f0 : \
((desc == 'A') ? S390_FIRST_ARG_REG : -1)))
#define MONO_ARCH_INST_IS_FLOAT(desc) ((desc == 'f') || (desc == 'g'))
#define MONO_ARCH_INST_SREG2_MASK(ins) (0)
#define MONO_ARCH_INST_IS_REGPAIR(desc) (0)
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hr) -1
#define MONO_ARCH_IS_GLOBAL_IREG(reg) 0
#define MONO_ARCH_FRAME_ALIGNMENT 8
#define MONO_ARCH_CODE_ALIGNMENT 32
/*-----------------------------------------------*/
/* SIMD Related Definitions */
/*-----------------------------------------------*/
#define MONO_MAX_XREGS 31
#define MONO_ARCH_CALLEE_XREGS 0x0
#define MONO_ARCH_CALLEE_SAVED_XREGS 0x0
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0 // FIXME?
/*-----------------------------------------------*/
/* Macros used to generate instructions */
/*-----------------------------------------------*/
#define S390_OFFSET(b, t) (guchar *) ((guint64) (b) - (guint64) (t))
#define S390_RELATIVE(b, t) (guchar *) ((((guint64) (b) - (guint64) (t))) / 2)
#define CODEPTR(c, o) (o) = (short *) ((guint64) c - 2)
#define PTRSLOT(c, o) *(o) = (short) ((guint64) c - (guint64) (o) + 2)/2
#define S390_CC_EQ 8
#define S390_ALIGN(v, a) (((a) > 0 ? (((v) + ((a) - 1)) & ~((a) - 1)) : (v)))
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,func) do { \
void *sp = __builtin_frame_address (0); \
MONO_CONTEXT_SET_BP ((ctx), sp); \
MONO_CONTEXT_SET_SP ((ctx), sp); \
MONO_CONTEXT_SET_IP ((ctx), func); \
} while (0)
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf) do { (lmf)->ebp = -1; } while (0)
/**
*
* @brief Patch the code with a given offset
* @param[in] @code - Area to patch
* @param[in] @target - Value to patch with
*
*/
static void inline
s390_patch_rel (guchar *code, guint64 target)
{
guint32 *offset = (guint32 *) code;
if (target != 0) {
*offset = (guint32) target;
}
}
/*========================= End of Function ========================*/
/**
*
* @brief Patch the code with a given address
* @param[in] @code - Area to patch
* @param[in] @target - Address to patch with
*
*/
static void inline
s390_patch_addr (guchar *code, guint64 target)
{
guint64 *offset = (guint64 *) code;
if (target != 0) {
*offset = target;
}
}
/*========================= End of Function ========================*/
/*------------------------------------------------------------------*/
/* */
/* Name - restoreLMF */
/* */
/* Function - Restore the LMF state prior to exiting a method. */
/* */
/*------------------------------------------------------------------*/
#define restoreLMF(code, frame_reg, stack_usage) do \
{ \
int lmfOffset = 0; \
\
s390_lgr (code, s390_r13, frame_reg); \
\
lmfOffset = stack_usage - sizeof(MonoLMF); \
\
/*-------------------------------------------------*/ \
/* r13 = my lmf */ \
/*-------------------------------------------------*/ \
s390_aghi (code, s390_r13, lmfOffset); \
\
/*-------------------------------------------------*/ \
/* r6 = &jit_tls->lmf */ \
/*-------------------------------------------------*/ \
s390_lg (code, s390_r6, 0, s390_r13, \
G_STRUCT_OFFSET(MonoLMF, lmf_addr)); \
\
/*-------------------------------------------------*/ \
/* r0 = lmf.previous_lmf */ \
/*-------------------------------------------------*/ \
s390_lg (code, s390_r0, 0, s390_r13, \
G_STRUCT_OFFSET(MonoLMF, previous_lmf)); \
\
/*-------------------------------------------------*/ \
/* jit_tls->lmf = previous_lmf */ \
/*-------------------------------------------------*/ \
s390_lg (code, s390_r13, 0, s390_r6, 0); \
s390_stg (code, s390_r0, 0, s390_r6, 0); \
} while (0)
/*========================= End of Function ========================*/
#define S390_SET(loc, dr, v) \
do { \
guint64 val = (guint64) v; \
if (s390_is_imm16(val)) { \
s390_lghi(loc, dr, val); \
} else if (s390_is_uimm16(val)) { \
s390_llill(loc, dr, val); \
} else if (s390_is_imm32(val)) { \
s390_lgfi(loc, dr, val); \
} else if (s390_is_uimm32(val)) { \
s390_llilf(loc, dr, val); \
} else { \
guint32 hi = (val) >> 32; \
guint32 lo = (val) & 0xffffffff; \
s390_iihf(loc, dr, hi); \
s390_iilf(loc, dr, lo); \
} \
} while (0)
#define S390_LONG(loc, opy, op, r, ix, br, off) \
if (s390_is_imm20(off)) { \
s390_##opy (loc, r, ix, br, off); \
} else { \
if (ix == 0) { \
S390_SET(loc, s390_r13, off); \
s390_la (loc, s390_r13, s390_r13, br, 0); \
} else { \
s390_la (loc, s390_r13, ix, br, 0); \
S390_SET (loc, s390_r0, off); \
s390_agr (loc, s390_r13, s390_r0); \
} \
s390_##op (loc, r, 0, s390_r13, 0); \
}
#define S390_SET_MASK(loc, dr, v) \
do { \
if (s390_is_imm16 (v)) { \
s390_lghi (loc, dr, v); \
} else if (s390_is_imm32 (v)) { \
s390_lgfi (loc, dr, v); \
} else { \
gint64 val = (gint64) v; \
guint32 hi = (val) >> 32; \
guint32 lo = (val) & 0xffffffff; \
s390_iilf(loc, dr, lo); \
s390_iihf(loc, dr, hi); \
} \
} while (0)
#define S390_CALL_TEMPLATE(loc, r) \
do { \
s390_lgrl (loc, r, 0); \
s390_basr (loc, s390_r14, r); \
} while (0)
#define S390_BR_TEMPLATE(loc, r) \
do { \
s390_lgrl (loc, r, 0); \
s390_br (loc, r); \
} while (0)
#define S390_LOAD_TEMPLATE(loc, r) \
do { \
s390_iihf (loc, r, 0); \
s390_iilf (loc, r, 0); \
} while (0)
#define S390_EMIT_CALL(loc, t) \
do { \
uintptr_t rel; \
uintptr_t p = (uintptr_t) loc; \
rel = ((uintptr_t) t - (uintptr_t) loc) >> 1; \
p += 2; \
*(guint32 *) p = rel; \
} while (0)
#define S390_EMIT_LOAD(loc, v) \
do { \
gint64 val = (gint64) v; \
guint32 hi = (val) >> 32; \
guint32 lo = (val) & 0xffffffff; \
uintptr_t p = (uintptr_t) loc; \
p += 2; \
*(guint32 *) p = hi; \
p += 6; \
*(guint32 *) p = lo; \
} while (0)
#endif /* __MONO_MINI_S390X_H__ */
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-wasm.c | #include "mini.h"
#include "mini-runtime.h"
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/metadata.h>
#include <mono/metadata/loader-internals.h>
#include <mono/metadata/icall-internals.h>
#include <mono/metadata/seq-points-data.h>
#include <mono/mini/aot-runtime.h>
#include <mono/mini/seq-points.h>
#include <mono/utils/mono-threads.h>
#include <mono/metadata/components.h>
static int mono_wasm_debug_level = 0;
#ifndef DISABLE_JIT
#include "ir-emit.h"
#include "cpu-wasm.h"
//FIXME figure out if we need to distingush between i,l,f,d types
typedef enum {
ArgOnStack,
ArgValuetypeAddrOnStack,
ArgGsharedVTOnStack,
ArgValuetypeAddrInIReg,
ArgVtypeAsScalar,
ArgInvalid,
} ArgStorage;
typedef struct {
ArgStorage storage : 8;
MonoType *type;
} ArgInfo;
struct CallInfo {
int nargs;
gboolean gsharedvt;
ArgInfo ret;
ArgInfo args [1];
};
// WASM ABI: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
static ArgStorage
get_storage (MonoType *type, gboolean is_return)
{
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
return ArgOnStack;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
return ArgOnStack;
case MONO_TYPE_R4:
return ArgOnStack;
case MONO_TYPE_R8:
return ArgOnStack;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (type))
return ArgOnStack;
if (mini_is_gsharedvt_type (type)) {
return ArgGsharedVTOnStack;
}
/* fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
if (mini_wasm_is_scalar_vtype (type))
return ArgVtypeAsScalar;
return is_return ? ArgValuetypeAddrInIReg : ArgValuetypeAddrOnStack;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (type));
return ArgGsharedVTOnStack;
case MONO_TYPE_VOID:
g_assert (is_return);
break;
default:
g_error ("Can't handle as return value 0x%x", type->type);
}
return ArgInvalid;
}
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
int n = sig->hasthis + sig->param_count;
CallInfo *cinfo;
if (mp)
cinfo = (CallInfo *)mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = (CallInfo *)g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
cinfo->gsharedvt = mini_is_gsharedvt_variable_signature (sig);
/* return value */
cinfo->ret.type = mini_get_underlying_type (sig->ret);
cinfo->ret.storage = get_storage (cinfo->ret.type, TRUE);
if (sig->hasthis)
cinfo->args [0].storage = ArgOnStack;
// not supported
g_assert (sig->call_convention != MONO_CALL_VARARG);
int i;
for (i = 0; i < sig->param_count; ++i) {
cinfo->args [i + sig->hasthis].type = mini_get_underlying_type (sig->params [i]);
cinfo->args [i + sig->hasthis].storage = get_storage (cinfo->args [i + sig->hasthis].type, FALSE);
}
return cinfo;
}
gboolean
mono_arch_have_fast_tls (void)
{
return FALSE;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
g_error ("mono_arch_get_patch_offset");
return 0;
}
gpointer
mono_arch_ip_from_context (void *sigctx)
{
g_error ("mono_arch_ip_from_context");
}
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return TRUE;
}
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
gboolean
mono_arch_opcode_supported (int opcode)
{
return FALSE;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
g_error ("mono_arch_output_basic_block");
}
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
return 0;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
g_error ("mono_arch_get_allocatable_int_vars");
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
g_error ("mono_arch_get_global_int_regs");
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
g_error ("mono_arch_allocate_vars");
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = (CallInfo *)cfg->arch.cinfo;
// if (cinfo->ret.storage == ArgValuetypeInReg)
// cfg->ret_var_is_local = TRUE;
mini_get_underlying_type (sig->ret);
if (cinfo->ret.storage == ArgValuetypeAddrInIReg || cinfo->ret.storage == ArgGsharedVTOnStack) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points)
g_error ("gen_sdb_seq_points not supported");
if (cfg->method->save_lmf) {
cfg->create_lmf_var = TRUE;
cfg->lmf_ir = TRUE;
}
}
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
g_error ("mono_arch_emit_call");
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
g_error ("mono_arch_emit_epilog");
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
g_error ("mono_arch_emit_exceptions");
}
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return NULL;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
g_error ("mono_arch_emit_outarg_vt");
}
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
g_error ("mono_arch_emit_prolog");
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (!m_type_is_byref (ret)) {
if (ret->type == MONO_TYPE_R4) {
MONO_EMIT_NEW_UNALU (cfg, OP_RMOVE, cfg->ret->dreg, val->dreg);
return;
} else if (ret->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
return;
} else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
return;
}
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
void
mono_arch_flush_icache (guint8 *code, gint size)
{
}
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
LLVMCallInfo *linfo;
cinfo = get_call_info (cfg->mempool, sig);
n = cinfo->nargs;
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
if (cinfo->ret.storage == ArgVtypeAsScalar) {
linfo->ret.storage = LLVMArgWasmVtypeAsScalar;
linfo->ret.esize = mono_class_value_size (mono_class_from_mono_type_internal (cinfo->ret.type), NULL);
} else if (mini_type_is_vtype (sig->ret)) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
// linfo->vret_arg_index = cinfo->vret_arg_index;
} else {
if (sig->ret->type != MONO_TYPE_VOID)
linfo->ret.storage = LLVMArgNormal;
}
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = &cinfo->args[i];
switch (ainfo->storage) {
case ArgOnStack:
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgValuetypeAddrOnStack:
linfo->args [i].storage = LLVMArgVtypeByRef;
break;
case ArgGsharedVTOnStack:
linfo->args [i].storage = LLVMArgGsharedvtVariable;
break;
case ArgVtypeAsScalar:
linfo->args [i].storage = LLVMArgWasmVtypeAsScalar;
linfo->args [i].type = ainfo->type;
linfo->args [i].esize = mono_class_value_size (mono_class_from_mono_type_internal (ainfo->type), NULL);
break;
case ArgValuetypeAddrInIReg:
g_error ("this is only valid for sig->ret");
break;
}
}
return linfo;
}
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
return FALSE;
}
#endif // DISABLE_JIT
const char*
mono_arch_fregname (int reg)
{
return "freg0";
}
const char*
mono_arch_regname (int reg)
{
return "r0";
}
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
g_error ("mono_arch_get_argument_info");
}
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
g_error ("mono_arch_get_delegate_invoke_impls");
}
gpointer
mono_arch_get_gsharedvt_call_info (MonoMemoryManager *mem_manager, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
{
g_error ("mono_arch_get_gsharedvt_call_info");
return NULL;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
g_error ("mono_arch_get_delegate_invoke_impl");
}
#ifdef HOST_BROWSER
#include <emscripten.h>
//functions exported to be used by JS
G_BEGIN_DECLS
EMSCRIPTEN_KEEPALIVE void mono_set_timeout_exec (void);
//JS functions imported that we use
extern void mono_set_timeout (int t);
extern void mono_wasm_queue_tp_cb (void);
G_END_DECLS
void mono_background_exec (void);
#endif // HOST_BROWSER
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
g_error ("mono_arch_get_this_arg_from_call");
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
g_error ("mono_arch_get_delegate_virtual_invoke_impl");
}
void
mono_arch_cpu_init (void)
{
// printf ("mono_arch_cpu_init\n");
}
void
mono_arch_finish_init (void)
{
// printf ("mono_arch_finish_init\n");
}
void
mono_arch_init (void)
{
// printf ("mono_arch_init\n");
}
void
mono_arch_cleanup (void)
{
}
void
mono_arch_register_lowlevel_calls (void)
{
}
void
mono_arch_flush_register_windows (void)
{
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
g_error ("mono_arch_find_static_call_vtable");
return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
g_error ("mono_arch_find_static_call_vtable");
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
return l;
}
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp)
{
g_error ("mono_arch_build_imt_trampoline");
}
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
/* No arch specific passes yet */
*exclude_mask = 0;
return 0;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
g_error ("mono_arch_context_get_int_reg");
return 0;
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
g_error ("mono_arch_context_get_int_reg_address");
return 0;
}
#if defined(HOST_BROWSER) || defined(HOST_WASI)
void
mono_runtime_install_handlers (void)
{
}
void
mono_init_native_crash_info (void)
{
return;
}
#endif
#ifdef HOST_BROWSER
void
mono_runtime_setup_stat_profiler (void)
{
g_error ("mono_runtime_setup_stat_profiler");
}
gboolean
MONO_SIG_HANDLER_SIGNATURE (mono_chain_signal)
{
g_error ("mono_chain_signal");
return FALSE;
}
gboolean
mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo *info, void *sigctx)
{
g_error ("WASM systems don't support mono_thread_state_init_from_handle");
return FALSE;
}
EMSCRIPTEN_KEEPALIVE void
mono_set_timeout_exec (void)
{
ERROR_DECL (error);
static MonoMethod *method = NULL;
if (method == NULL) {
MonoClass *klass = mono_class_load_from_name (mono_defaults.corlib, "System.Threading", "TimerQueue");
g_assert (klass);
method = mono_class_get_method_from_name_checked (klass, "TimeoutCallback", -1, 0, error);
mono_error_assert_ok (error);
g_assert (method);
}
MonoObject *exc = NULL;
mono_runtime_try_invoke (method, NULL, NULL, &exc, error);
//YES we swallow exceptions cuz there's nothing much we can do from here.
//FIXME Maybe call the unhandled exception function?
if (!is_ok (error)) {
g_printerr ("timeout callback failed due to %s\n", mono_error_get_message (error));
mono_error_cleanup (error);
}
if (exc) {
char *type_name = mono_type_get_full_name (mono_object_class (exc));
g_printerr ("timeout callback threw a %s\n", type_name);
g_free (type_name);
}
}
#endif
void
mono_wasm_set_timeout (int timeout)
{
#ifdef HOST_BROWSER
mono_set_timeout (timeout);
#endif
}
static void
tp_cb (void)
{
ERROR_DECL (error);
static MonoMethod *method = NULL;
if (method == NULL) {
MonoClass *klass = mono_class_load_from_name (mono_defaults.corlib, "System.Threading", "ThreadPool");
g_assert (klass);
method = mono_class_get_method_from_name_checked (klass, "Callback", -1, 0, error);
mono_error_assert_ok (error);
g_assert (method);
}
MonoObject *exc = NULL;
mono_runtime_try_invoke (method, NULL, NULL, &exc, error);
if (!is_ok (error)) {
g_printerr ("ThreadPool Callback failed due to error: %s\n", mono_error_get_message (error));
mono_error_cleanup (error);
}
if (exc) {
char *type_name = mono_type_get_full_name (mono_object_class (exc));
g_printerr ("ThreadPool Callback threw an unhandled exception of type %s\n", type_name);
g_free (type_name);
}
}
#ifdef HOST_BROWSER
void
mono_wasm_queue_tp_cb (void)
{
mono_threads_schedule_background_job (tp_cb);
}
#endif
void
mono_arch_register_icall (void)
{
#ifdef HOST_BROWSER
mono_add_internal_call_internal ("System.Threading.TimerQueue::SetTimeout", mono_wasm_set_timeout);
mono_add_internal_call_internal ("System.Threading.ThreadPool::QueueCallback", mono_wasm_queue_tp_cb);
#endif
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
g_error ("mono_arch_patch_code_new");
}
#ifdef HOST_BROWSER
G_BEGIN_DECLS
int inotify_init (void);
int inotify_rm_watch (int fd, int wd);
int inotify_add_watch (int fd, const char *pathname, uint32_t mask);
int sem_timedwait (sem_t *sem, const struct timespec *abs_timeout);
G_END_DECLS
G_BEGIN_DECLS
//llvm builtin's that we should not have used in the first place
#include <sys/types.h>
#include <pwd.h>
#include <uuid/uuid.h>
#ifndef __EMSCRIPTEN_PTHREADS__
int pthread_getschedparam (pthread_t thread, int *policy, struct sched_param *param)
{
g_error ("pthread_getschedparam");
return 0;
}
#endif
int
pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param)
{
return 0;
}
int
sigsuspend(const sigset_t *sigmask)
{
g_error ("sigsuspend");
return 0;
}
int
getdtablesize (void)
{
return 256; //random constant that is the fd limit
}
int
inotify_init (void)
{
g_error ("inotify_init");
}
int
inotify_rm_watch (int fd, int wd)
{
g_error ("inotify_rm_watch");
return 0;
}
int
inotify_add_watch (int fd, const char *pathname, uint32_t mask)
{
g_error ("inotify_add_watch");
return 0;
}
#ifndef __EMSCRIPTEN_PTHREADS__
int
sem_timedwait (sem_t *sem, const struct timespec *abs_timeout)
{
g_error ("sem_timedwait");
return 0;
}
#endif
ssize_t sendfile(int out_fd, int in_fd, off_t *offset, size_t count);
ssize_t sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
{
errno = ENOTSUP;
return -1;
}
G_END_DECLS
/* Helper for runtime debugging */
void
mono_wasm_print_stack_trace (void)
{
EM_ASM(
var err = new Error();
console.log ("Stacktrace: \n");
console.log (err.stack);
);
}
#endif // HOST_BROWSER
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
return NULL;
}
MONO_API void
mono_wasm_enable_debugging (int log_level)
{
mono_wasm_debug_level = log_level;
}
int
mono_wasm_get_debug_level (void)
{
return mono_wasm_debug_level;
}
/* Return whenever TYPE represents a vtype with only one scalar member */
gboolean
mini_wasm_is_scalar_vtype (MonoType *type)
{
MonoClass *klass;
MonoClassField *field;
gpointer iter;
if (!MONO_TYPE_ISSTRUCT (type))
return FALSE;
klass = mono_class_from_mono_type_internal (type);
mono_class_init_internal (klass);
int size = mono_class_value_size (klass, NULL);
if (size == 0 || size >= 8)
return FALSE;
iter = NULL;
int nfields = 0;
field = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
nfields ++;
if (nfields > 1)
return FALSE;
MonoType *t = mini_get_underlying_type (field->type);
if (MONO_TYPE_ISSTRUCT (t)) {
if (!mini_wasm_is_scalar_vtype (t))
return FALSE;
} else if (!((MONO_TYPE_IS_PRIMITIVE (t) || MONO_TYPE_IS_REFERENCE (t) || MONO_TYPE_IS_POINTER (t)))) {
return FALSE;
}
}
return TRUE;
}
| #include "mini.h"
#include "mini-runtime.h"
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/metadata.h>
#include <mono/metadata/loader-internals.h>
#include <mono/metadata/icall-internals.h>
#include <mono/metadata/seq-points-data.h>
#include <mono/mini/aot-runtime.h>
#include <mono/mini/seq-points.h>
#include <mono/utils/mono-threads.h>
#include <mono/metadata/components.h>
static int mono_wasm_debug_level = 0;
#ifndef DISABLE_JIT
#include "ir-emit.h"
#include "cpu-wasm.h"
//FIXME figure out if we need to distingush between i,l,f,d types
typedef enum {
ArgOnStack,
ArgValuetypeAddrOnStack,
ArgGsharedVTOnStack,
ArgValuetypeAddrInIReg,
ArgVtypeAsScalar,
ArgInvalid,
} ArgStorage;
typedef struct {
ArgStorage storage : 8;
MonoType *type;
} ArgInfo;
struct CallInfo {
int nargs;
gboolean gsharedvt;
ArgInfo ret;
ArgInfo args [1];
};
// WASM ABI: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
static ArgStorage
get_storage (MonoType *type, gboolean is_return)
{
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
case MONO_TYPE_OBJECT:
return ArgOnStack;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
return ArgOnStack;
case MONO_TYPE_R4:
return ArgOnStack;
case MONO_TYPE_R8:
return ArgOnStack;
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (type))
return ArgOnStack;
if (mini_is_gsharedvt_type (type)) {
return ArgGsharedVTOnStack;
}
/* fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF: {
if (mini_wasm_is_scalar_vtype (type))
return ArgVtypeAsScalar;
return is_return ? ArgValuetypeAddrInIReg : ArgValuetypeAddrOnStack;
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_is_gsharedvt_type (type));
return ArgGsharedVTOnStack;
case MONO_TYPE_VOID:
g_assert (is_return);
break;
default:
g_error ("Can't handle as return value 0x%x", type->type);
}
return ArgInvalid;
}
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
int n = sig->hasthis + sig->param_count;
CallInfo *cinfo;
if (mp)
cinfo = (CallInfo *)mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
else
cinfo = (CallInfo *)g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
cinfo->nargs = n;
cinfo->gsharedvt = mini_is_gsharedvt_variable_signature (sig);
/* return value */
cinfo->ret.type = mini_get_underlying_type (sig->ret);
cinfo->ret.storage = get_storage (cinfo->ret.type, TRUE);
if (sig->hasthis)
cinfo->args [0].storage = ArgOnStack;
// not supported
g_assert (sig->call_convention != MONO_CALL_VARARG);
int i;
for (i = 0; i < sig->param_count; ++i) {
cinfo->args [i + sig->hasthis].type = mini_get_underlying_type (sig->params [i]);
cinfo->args [i + sig->hasthis].storage = get_storage (cinfo->args [i + sig->hasthis].type, FALSE);
}
return cinfo;
}
gboolean
mono_arch_have_fast_tls (void)
{
return FALSE;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
g_error ("mono_arch_get_patch_offset");
return 0;
}
gpointer
mono_arch_ip_from_context (void *sigctx)
{
g_error ("mono_arch_ip_from_context");
}
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
return TRUE;
}
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
gboolean
mono_arch_opcode_supported (int opcode)
{
return FALSE;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
g_error ("mono_arch_output_basic_block");
}
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
return 0;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
g_error ("mono_arch_get_allocatable_int_vars");
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
g_error ("mono_arch_get_global_int_regs");
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
g_error ("mono_arch_allocate_vars");
}
void
mono_arch_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
CallInfo *cinfo;
sig = mono_method_signature_internal (cfg->method);
if (!cfg->arch.cinfo)
cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = (CallInfo *)cfg->arch.cinfo;
// if (cinfo->ret.storage == ArgValuetypeInReg)
// cfg->ret_var_is_local = TRUE;
mini_get_underlying_type (sig->ret);
if (cinfo->ret.storage == ArgValuetypeAddrInIReg || cinfo->ret.storage == ArgGsharedVTOnStack) {
cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
mono_print_ins (cfg->vret_addr);
}
}
if (cfg->gen_sdb_seq_points)
g_error ("gen_sdb_seq_points not supported");
if (cfg->method->save_lmf) {
cfg->create_lmf_var = TRUE;
cfg->lmf_ir = TRUE;
}
}
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
g_error ("mono_arch_emit_call");
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
g_error ("mono_arch_emit_epilog");
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
g_error ("mono_arch_emit_exceptions");
}
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return NULL;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
g_error ("mono_arch_emit_outarg_vt");
}
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
g_error ("mono_arch_emit_prolog");
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
if (!m_type_is_byref (ret)) {
if (ret->type == MONO_TYPE_R4) {
MONO_EMIT_NEW_UNALU (cfg, cfg->r4fp ? OP_RMOVE : OP_FMOVE, cfg->ret->dreg, val->dreg);
return;
} else if (ret->type == MONO_TYPE_R8) {
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
return;
} else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
return;
}
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
void
mono_arch_flush_icache (guint8 *code, gint size)
{
}
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
int i, n;
CallInfo *cinfo;
LLVMCallInfo *linfo;
cinfo = get_call_info (cfg->mempool, sig);
n = cinfo->nargs;
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
if (cinfo->ret.storage == ArgVtypeAsScalar) {
linfo->ret.storage = LLVMArgWasmVtypeAsScalar;
linfo->ret.esize = mono_class_value_size (mono_class_from_mono_type_internal (cinfo->ret.type), NULL);
} else if (mini_type_is_vtype (sig->ret)) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
// linfo->vret_arg_index = cinfo->vret_arg_index;
} else {
if (sig->ret->type != MONO_TYPE_VOID)
linfo->ret.storage = LLVMArgNormal;
}
for (i = 0; i < n; ++i) {
ArgInfo *ainfo = &cinfo->args[i];
switch (ainfo->storage) {
case ArgOnStack:
linfo->args [i].storage = LLVMArgNormal;
break;
case ArgValuetypeAddrOnStack:
linfo->args [i].storage = LLVMArgVtypeByRef;
break;
case ArgGsharedVTOnStack:
linfo->args [i].storage = LLVMArgGsharedvtVariable;
break;
case ArgVtypeAsScalar:
linfo->args [i].storage = LLVMArgWasmVtypeAsScalar;
linfo->args [i].type = ainfo->type;
linfo->args [i].esize = mono_class_value_size (mono_class_from_mono_type_internal (ainfo->type), NULL);
break;
case ArgValuetypeAddrInIReg:
g_error ("this is only valid for sig->ret");
break;
}
}
return linfo;
}
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
return FALSE;
}
#endif // DISABLE_JIT
const char*
mono_arch_fregname (int reg)
{
return "freg0";
}
const char*
mono_arch_regname (int reg)
{
return "r0";
}
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
g_error ("mono_arch_get_argument_info");
}
GSList*
mono_arch_get_delegate_invoke_impls (void)
{
g_error ("mono_arch_get_delegate_invoke_impls");
}
gpointer
mono_arch_get_gsharedvt_call_info (MonoMemoryManager *mem_manager, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli)
{
g_error ("mono_arch_get_gsharedvt_call_info");
return NULL;
}
gpointer
mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
{
g_error ("mono_arch_get_delegate_invoke_impl");
}
#ifdef HOST_BROWSER
#include <emscripten.h>
//functions exported to be used by JS
G_BEGIN_DECLS
EMSCRIPTEN_KEEPALIVE void mono_set_timeout_exec (void);
//JS functions imported that we use
extern void mono_set_timeout (int t);
extern void mono_wasm_queue_tp_cb (void);
G_END_DECLS
void mono_background_exec (void);
#endif // HOST_BROWSER
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
g_error ("mono_arch_get_this_arg_from_call");
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
g_error ("mono_arch_get_delegate_virtual_invoke_impl");
}
void
mono_arch_cpu_init (void)
{
// printf ("mono_arch_cpu_init\n");
}
void
mono_arch_finish_init (void)
{
// printf ("mono_arch_finish_init\n");
}
void
mono_arch_init (void)
{
// printf ("mono_arch_init\n");
}
void
mono_arch_cleanup (void)
{
}
void
mono_arch_register_lowlevel_calls (void)
{
}
void
mono_arch_flush_register_windows (void)
{
}
MonoMethod*
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
g_error ("mono_arch_find_static_call_vtable");
return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
}
MonoVTable*
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
g_error ("mono_arch_find_static_call_vtable");
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
return l;
}
gpointer
mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp)
{
g_error ("mono_arch_build_imt_trampoline");
}
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
/* No arch specific passes yet */
*exclude_mask = 0;
return 0;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
g_error ("mono_arch_context_get_int_reg");
return 0;
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
g_error ("mono_arch_context_get_int_reg_address");
return 0;
}
#if defined(HOST_BROWSER) || defined(HOST_WASI)
void
mono_runtime_install_handlers (void)
{
}
void
mono_init_native_crash_info (void)
{
return;
}
#endif
#ifdef HOST_BROWSER
void
mono_runtime_setup_stat_profiler (void)
{
g_error ("mono_runtime_setup_stat_profiler");
}
gboolean
MONO_SIG_HANDLER_SIGNATURE (mono_chain_signal)
{
g_error ("mono_chain_signal");
return FALSE;
}
gboolean
mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo *info, void *sigctx)
{
g_error ("WASM systems don't support mono_thread_state_init_from_handle");
return FALSE;
}
EMSCRIPTEN_KEEPALIVE void
mono_set_timeout_exec (void)
{
ERROR_DECL (error);
static MonoMethod *method = NULL;
if (method == NULL) {
MonoClass *klass = mono_class_load_from_name (mono_defaults.corlib, "System.Threading", "TimerQueue");
g_assert (klass);
method = mono_class_get_method_from_name_checked (klass, "TimeoutCallback", -1, 0, error);
mono_error_assert_ok (error);
g_assert (method);
}
MonoObject *exc = NULL;
mono_runtime_try_invoke (method, NULL, NULL, &exc, error);
//YES we swallow exceptions cuz there's nothing much we can do from here.
//FIXME Maybe call the unhandled exception function?
if (!is_ok (error)) {
g_printerr ("timeout callback failed due to %s\n", mono_error_get_message (error));
mono_error_cleanup (error);
}
if (exc) {
char *type_name = mono_type_get_full_name (mono_object_class (exc));
g_printerr ("timeout callback threw a %s\n", type_name);
g_free (type_name);
}
}
#endif
void
mono_wasm_set_timeout (int timeout)
{
#ifdef HOST_BROWSER
mono_set_timeout (timeout);
#endif
}
static void
tp_cb (void)
{
ERROR_DECL (error);
static MonoMethod *method = NULL;
if (method == NULL) {
MonoClass *klass = mono_class_load_from_name (mono_defaults.corlib, "System.Threading", "ThreadPool");
g_assert (klass);
method = mono_class_get_method_from_name_checked (klass, "Callback", -1, 0, error);
mono_error_assert_ok (error);
g_assert (method);
}
MonoObject *exc = NULL;
mono_runtime_try_invoke (method, NULL, NULL, &exc, error);
if (!is_ok (error)) {
g_printerr ("ThreadPool Callback failed due to error: %s\n", mono_error_get_message (error));
mono_error_cleanup (error);
}
if (exc) {
char *type_name = mono_type_get_full_name (mono_object_class (exc));
g_printerr ("ThreadPool Callback threw an unhandled exception of type %s\n", type_name);
g_free (type_name);
}
}
#ifdef HOST_BROWSER
void
mono_wasm_queue_tp_cb (void)
{
mono_threads_schedule_background_job (tp_cb);
}
#endif
void
mono_arch_register_icall (void)
{
#ifdef HOST_BROWSER
mono_add_internal_call_internal ("System.Threading.TimerQueue::SetTimeout", mono_wasm_set_timeout);
mono_add_internal_call_internal ("System.Threading.ThreadPool::QueueCallback", mono_wasm_queue_tp_cb);
#endif
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
g_error ("mono_arch_patch_code_new");
}
#ifdef HOST_BROWSER
G_BEGIN_DECLS
int inotify_init (void);
int inotify_rm_watch (int fd, int wd);
int inotify_add_watch (int fd, const char *pathname, uint32_t mask);
int sem_timedwait (sem_t *sem, const struct timespec *abs_timeout);
G_END_DECLS
G_BEGIN_DECLS
//llvm builtin's that we should not have used in the first place
#include <sys/types.h>
#include <pwd.h>
#include <uuid/uuid.h>
#ifndef __EMSCRIPTEN_PTHREADS__
int pthread_getschedparam (pthread_t thread, int *policy, struct sched_param *param)
{
g_error ("pthread_getschedparam");
return 0;
}
#endif
int
pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param)
{
return 0;
}
int
sigsuspend(const sigset_t *sigmask)
{
g_error ("sigsuspend");
return 0;
}
int
getdtablesize (void)
{
return 256; //random constant that is the fd limit
}
int
inotify_init (void)
{
g_error ("inotify_init");
}
int
inotify_rm_watch (int fd, int wd)
{
g_error ("inotify_rm_watch");
return 0;
}
int
inotify_add_watch (int fd, const char *pathname, uint32_t mask)
{
g_error ("inotify_add_watch");
return 0;
}
#ifndef __EMSCRIPTEN_PTHREADS__
int
sem_timedwait (sem_t *sem, const struct timespec *abs_timeout)
{
g_error ("sem_timedwait");
return 0;
}
#endif
ssize_t sendfile(int out_fd, int in_fd, off_t *offset, size_t count);
ssize_t sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
{
errno = ENOTSUP;
return -1;
}
G_END_DECLS
/* Helper for runtime debugging */
void
mono_wasm_print_stack_trace (void)
{
EM_ASM(
var err = new Error();
console.log ("Stacktrace: \n");
console.log (err.stack);
);
}
#endif // HOST_BROWSER
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
return NULL;
}
MONO_API void
mono_wasm_enable_debugging (int log_level)
{
mono_wasm_debug_level = log_level;
}
int
mono_wasm_get_debug_level (void)
{
return mono_wasm_debug_level;
}
/* Return whenever TYPE represents a vtype with only one scalar member */
gboolean
mini_wasm_is_scalar_vtype (MonoType *type)
{
MonoClass *klass;
MonoClassField *field;
gpointer iter;
if (!MONO_TYPE_ISSTRUCT (type))
return FALSE;
klass = mono_class_from_mono_type_internal (type);
mono_class_init_internal (klass);
int size = mono_class_value_size (klass, NULL);
if (size == 0 || size >= 8)
return FALSE;
iter = NULL;
int nfields = 0;
field = NULL;
while ((field = mono_class_get_fields_internal (klass, &iter))) {
if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
continue;
nfields ++;
if (nfields > 1)
return FALSE;
MonoType *t = mini_get_underlying_type (field->type);
if (MONO_TYPE_ISSTRUCT (t)) {
if (!mini_wasm_is_scalar_vtype (t))
return FALSE;
} else if (!((MONO_TYPE_IS_PRIMITIVE (t) || MONO_TYPE_IS_REFERENCE (t) || MONO_TYPE_IS_POINTER (t)))) {
return FALSE;
}
}
return TRUE;
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-wasm.h | #ifndef __MONO_MINI_WASM_H__
#define __MONO_MINI_WASM_H__
#include <mono/utils/mono-sigcontext.h>
#include <mono/utils/mono-context.h>
#define MONO_ARCH_CPU_SPEC mono_wasm_desc
#define MONO_MAX_IREGS 1
#define MONO_MAX_FREGS 1
#define MONO_MAX_XREGS 1
#define WASM_REG_0 0
#define MONO_ARCH_USE_FPSTACK FALSE
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_NO_CODEMAN 1
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS 1
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS 1
//mini-codegen stubs - this doesn't do anything
#define MONO_ARCH_CALLEE_REGS (1 << 0)
#define MONO_ARCH_CALLEE_FREGS (1 << 1)
#define MONO_ARCH_CALLEE_XREGS (1 << 2)
#define MONO_ARCH_CALLEE_SAVED_FREGS (1 << 3)
#define MONO_ARCH_CALLEE_SAVED_REGS (1 << 4)
#define MONO_ARCH_INST_FIXED_REG(desc) FALSE
#define MONO_ARCH_INST_IS_REGPAIR(desc) FALSE
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (-1)
#define MONO_ARCH_INST_SREG2_MASK(ins) 0
struct MonoLMF {
/*
* If the second lowest bit is set to 1, then this is a MonoLMFExt structure, and
* the other fields are not valid.
*/
gpointer previous_lmf;
gpointer lmf_addr;
MonoMethod *method;
};
typedef struct {
gpointer cinfo;
} MonoCompileArch;
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf) do { } while (0)
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->llvm_exc_reg = (gsize)exc; } while (0)
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,start_func) do { \
int ___tmp = 99; \
MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
MONO_CONTEXT_SET_BP ((ctx), (0)); \
MONO_CONTEXT_SET_SP ((ctx), (&___tmp)); \
} while (0)
#define MONO_ARCH_VTABLE_REG WASM_REG_0
#define MONO_ARCH_IMT_REG WASM_REG_0
#define MONO_ARCH_RGCTX_REG WASM_REG_0
/* must be at a power of 2 and >= 8 */
#define MONO_ARCH_FRAME_ALIGNMENT 16
#define MONO_ARCH_USE_FPSTACK FALSE
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_SIMD_INTRINSICS 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_HAS_REGISTER_ICALL 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-m:e-p:32:32-i64:64-n32:64-S128"
#define MONO_ARCH_LLVM_TARGET_TRIPLE "wasm32-unknown-emscripten"
// sdks/wasm/driver.c is C and uses this
G_EXTERN_C void mono_wasm_enable_debugging (int log_level);
void mono_wasm_set_timeout (int timeout);
int mono_wasm_assembly_already_added (const char *assembly_name);
void mono_wasm_print_stack_trace (void);
gboolean
mini_wasm_is_scalar_vtype (MonoType *type);
#endif /* __MONO_MINI_WASM_H__ */
| #ifndef __MONO_MINI_WASM_H__
#define __MONO_MINI_WASM_H__
#include <mono/utils/mono-sigcontext.h>
#include <mono/utils/mono-context.h>
#define MONO_ARCH_CPU_SPEC mono_wasm_desc
#define MONO_MAX_IREGS 1
#define MONO_MAX_FREGS 1
#define MONO_MAX_XREGS 1
#define WASM_REG_0 0
#define MONO_ARCH_USE_FPSTACK FALSE
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_GSHARED_SUPPORTED 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_NO_CODEMAN 1
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_EMULATE_FCONV_TO_U8 1
#define MONO_ARCH_EMULATE_FCONV_TO_U4 1
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS 1
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS 1
#define MONO_ARCH_FLOAT32_SUPPORTED 1
//mini-codegen stubs - this doesn't do anything
#define MONO_ARCH_CALLEE_REGS (1 << 0)
#define MONO_ARCH_CALLEE_FREGS (1 << 1)
#define MONO_ARCH_CALLEE_XREGS (1 << 2)
#define MONO_ARCH_CALLEE_SAVED_FREGS (1 << 3)
#define MONO_ARCH_CALLEE_SAVED_REGS (1 << 4)
#define MONO_ARCH_INST_FIXED_REG(desc) FALSE
#define MONO_ARCH_INST_IS_REGPAIR(desc) FALSE
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (-1)
#define MONO_ARCH_INST_SREG2_MASK(ins) 0
struct MonoLMF {
/*
* If the second lowest bit is set to 1, then this is a MonoLMFExt structure, and
* the other fields are not valid.
*/
gpointer previous_lmf;
gpointer lmf_addr;
MonoMethod *method;
};
typedef struct {
gpointer cinfo;
} MonoCompileArch;
#define MONO_ARCH_INIT_TOP_LMF_ENTRY(lmf) do { } while (0)
#define MONO_CONTEXT_SET_LLVM_EXC_REG(ctx, exc) do { (ctx)->llvm_exc_reg = (gsize)exc; } while (0)
#define MONO_INIT_CONTEXT_FROM_FUNC(ctx,start_func) do { \
int ___tmp = 99; \
MONO_CONTEXT_SET_IP ((ctx), (start_func)); \
MONO_CONTEXT_SET_BP ((ctx), (0)); \
MONO_CONTEXT_SET_SP ((ctx), (&___tmp)); \
} while (0)
#define MONO_ARCH_VTABLE_REG WASM_REG_0
#define MONO_ARCH_IMT_REG WASM_REG_0
#define MONO_ARCH_RGCTX_REG WASM_REG_0
/* must be at a power of 2 and >= 8 */
#define MONO_ARCH_FRAME_ALIGNMENT 16
#define MONO_ARCH_USE_FPSTACK FALSE
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 0
#define MONO_ARCH_AOT_SUPPORTED 1
#define MONO_ARCH_LLVM_SUPPORTED 1
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_SIMD_INTRINSICS 1
#define MONO_ARCH_INTERPRETER_SUPPORTED 1
#define MONO_ARCH_HAS_REGISTER_ICALL 1
#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#define MONO_ARCH_LLVM_TARGET_LAYOUT "e-m:e-p:32:32-i64:64-n32:64-S128"
#define MONO_ARCH_LLVM_TARGET_TRIPLE "wasm32-unknown-emscripten"
// sdks/wasm/driver.c is C and uses this
G_EXTERN_C void mono_wasm_enable_debugging (int log_level);
void mono_wasm_set_timeout (int timeout);
int mono_wasm_assembly_already_added (const char *assembly_name);
void mono_wasm_print_stack_trace (void);
gboolean
mini_wasm_is_scalar_vtype (MonoType *type);
#endif /* __MONO_MINI_WASM_H__ */
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini.c | /**
* \file
* The new Mono code generator.
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* Copyright 2002-2003 Ximian, Inc.
* Copyright 2003-2010 Novell, Inc.
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <math.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <mono/utils/memcheck.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/object.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-config.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/runtime.h>
#include <mono/metadata/attrdefs.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-path.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/dtrace.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/unlocked.h>
#include <mono/utils/mono-time.h>
#include "mini.h"
#include "seq-points.h"
#include <string.h>
#include <ctype.h>
#include "trace.h"
#include "ir-emit.h"
#include "jit-icalls.h"
#include "mini-gc.h"
#include "llvm-runtime.h"
#include "mini-llvm.h"
#include "lldb.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
MonoCallSpec *mono_jit_trace_calls;
MonoMethodDesc *mono_inject_async_exc_method;
int mono_inject_async_exc_pos;
MonoMethodDesc *mono_break_at_bb_method;
int mono_break_at_bb_bb_num;
gboolean mono_do_x86_stack_align = TRUE;
/* Counters */
static guint32 discarded_code;
static gint64 discarded_jit_time;
#define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
#define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
static mono_mutex_t jit_mutex;
#ifndef DISABLE_JIT
static guint32 jinfo_try_holes_size;
static MonoBackend *current_backend;
gpointer
mono_realloc_native_code (MonoCompile *cfg)
{
return g_realloc (cfg->native_code, cfg->code_size);
}
typedef struct {
MonoExceptionClause *clause;
MonoBasicBlock *basic_block;
int start_offset;
} TryBlockHole;
/**
* mono_emit_unwind_op:
*
* Add an unwind op with the given parameters for the list of unwind ops stored in
* cfg->unwind_ops.
*/
void
mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
{
MonoUnwindOp *op = (MonoUnwindOp *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
op->op = tag;
op->reg = reg;
op->val = val;
op->when = when;
cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
if (cfg->verbose_level > 1) {
switch (tag) {
case DW_CFA_def_cfa:
printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
break;
case DW_CFA_def_cfa_register:
printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
break;
case DW_CFA_def_cfa_offset:
printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
break;
case DW_CFA_offset:
printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
break;
}
}
}
/**
* mono_unlink_bblock:
*
* Unlink two basic blocks.
*/
void
mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
int i, pos;
gboolean found;
found = FALSE;
for (i = 0; i < from->out_count; ++i) {
if (to == from->out_bb [i]) {
found = TRUE;
break;
}
}
if (found) {
pos = 0;
for (i = 0; i < from->out_count; ++i) {
if (from->out_bb [i] != to)
from->out_bb [pos ++] = from->out_bb [i];
}
g_assert (pos == from->out_count - 1);
from->out_count--;
}
found = FALSE;
for (i = 0; i < to->in_count; ++i) {
if (from == to->in_bb [i]) {
found = TRUE;
break;
}
}
if (found) {
pos = 0;
for (i = 0; i < to->in_count; ++i) {
if (to->in_bb [i] != from)
to->in_bb [pos ++] = to->in_bb [i];
}
g_assert (pos == to->in_count - 1);
to->in_count--;
}
}
/*
* mono_bblocks_linked:
*
* Return whenever BB1 and BB2 are linked in the CFG.
*/
gboolean
mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
{
int i;
for (i = 0; i < bb1->out_count; ++i) {
if (bb1->out_bb [i] == bb2)
return TRUE;
}
return FALSE;
}
static int
mono_find_block_region_notry (MonoCompile *cfg, int offset)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
(offset < (clause->handler_offset)))
return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
else
return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
}
}
return -1;
}
/*
* mono_get_block_region_notry:
*
* Return the region corresponding to REGION, ignoring try clauses nested inside
* finally clauses.
*/
int
mono_get_block_region_notry (MonoCompile *cfg, int region)
{
if ((region & (0xf << 4)) == MONO_REGION_TRY) {
MonoMethodHeader *header = cfg->header;
/*
* This can happen if a try clause is nested inside a finally clause.
*/
int clause_index = (region >> 8) - 1;
g_assert (clause_index >= 0 && clause_index < header->num_clauses);
region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
}
return region;
}
MonoInst *
mono_find_spvar_for_region (MonoCompile *cfg, int region)
{
region = mono_get_block_region_notry (cfg, region);
return (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
}
static void
df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
{
int i;
array [*dfn] = start;
/* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
for (i = 0; i < start->out_count; ++i) {
if (start->out_bb [i]->dfn)
continue;
(*dfn)++;
start->out_bb [i]->dfn = *dfn;
start->out_bb [i]->df_parent = start;
array [*dfn] = start->out_bb [i];
df_visit (start->out_bb [i], dfn, array);
}
}
guint32
mono_reverse_branch_op (guint32 opcode)
{
static const int reverse_map [] = {
CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
};
static const int reverse_fmap [] = {
OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
};
static const int reverse_lmap [] = {
OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
};
static const int reverse_imap [] = {
OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
};
if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
opcode = reverse_map [opcode - CEE_BEQ];
} else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
opcode = reverse_fmap [opcode - OP_FBEQ];
} else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
opcode = reverse_lmap [opcode - OP_LBEQ];
} else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
opcode = reverse_imap [opcode - OP_IBEQ];
} else
g_assert_not_reached ();
return opcode;
}
guint
mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
{
type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_STOREI1_MEMBASE_REG;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_STOREI2_MEMBASE_REG;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_STOREI4_MEMBASE_REG;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_STORE_MEMBASE_REG;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_STORE_MEMBASE_REG;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_STOREI8_MEMBASE_REG;
case MONO_TYPE_R4:
return OP_STORER4_MEMBASE_REG;
case MONO_TYPE_R8:
return OP_STORER8_MEMBASE_REG;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_STOREX_MEMBASE;
return OP_STOREV_MEMBASE;
case MONO_TYPE_TYPEDBYREF:
return OP_STOREV_MEMBASE;
case MONO_TYPE_GENERICINST:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_STOREX_MEMBASE;
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_type_var_is_vt (type));
return OP_STOREV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
}
return -1;
}
guint
mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
{
type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_I1:
return OP_LOADI1_MEMBASE;
case MONO_TYPE_U1:
return OP_LOADU1_MEMBASE;
case MONO_TYPE_I2:
return OP_LOADI2_MEMBASE;
case MONO_TYPE_U2:
return OP_LOADU2_MEMBASE;
case MONO_TYPE_I4:
return OP_LOADI4_MEMBASE;
case MONO_TYPE_U4:
return OP_LOADU4_MEMBASE;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_LOAD_MEMBASE;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_LOAD_MEMBASE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_LOADI8_MEMBASE;
case MONO_TYPE_R4:
return OP_LOADR4_MEMBASE;
case MONO_TYPE_R8:
return OP_LOADR8_MEMBASE;
case MONO_TYPE_VALUETYPE:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_LOADX_MEMBASE;
case MONO_TYPE_TYPEDBYREF:
return OP_LOADV_MEMBASE;
case MONO_TYPE_GENERICINST:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_LOADX_MEMBASE;
if (mono_type_generic_inst_is_valuetype (type))
return OP_LOADV_MEMBASE;
else
return OP_LOAD_MEMBASE;
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
g_assert (mini_type_var_is_vt (type));
return OP_LOADV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
}
return -1;
}
guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
type = mini_get_underlying_type (type);
if (cfg->gshared && !m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
g_assert (mini_type_var_is_vt (type));
return CEE_STOBJ;
}
return mono_type_to_stind (type);
}
int
mono_op_imm_to_op (int opcode)
{
switch (opcode) {
case OP_ADD_IMM:
#if SIZEOF_REGISTER == 4
return OP_IADD;
#else
return OP_LADD;
#endif
case OP_IADD_IMM:
return OP_IADD;
case OP_LADD_IMM:
return OP_LADD;
case OP_ISUB_IMM:
return OP_ISUB;
case OP_LSUB_IMM:
return OP_LSUB;
case OP_IMUL_IMM:
return OP_IMUL;
case OP_LMUL_IMM:
return OP_LMUL;
case OP_AND_IMM:
#if SIZEOF_REGISTER == 4
return OP_IAND;
#else
return OP_LAND;
#endif
case OP_OR_IMM:
#if SIZEOF_REGISTER == 4
return OP_IOR;
#else
return OP_LOR;
#endif
case OP_XOR_IMM:
#if SIZEOF_REGISTER == 4
return OP_IXOR;
#else
return OP_LXOR;
#endif
case OP_IAND_IMM:
return OP_IAND;
case OP_LAND_IMM:
return OP_LAND;
case OP_IOR_IMM:
return OP_IOR;
case OP_LOR_IMM:
return OP_LOR;
case OP_IXOR_IMM:
return OP_IXOR;
case OP_LXOR_IMM:
return OP_LXOR;
case OP_ISHL_IMM:
return OP_ISHL;
case OP_LSHL_IMM:
return OP_LSHL;
case OP_ISHR_IMM:
return OP_ISHR;
case OP_LSHR_IMM:
return OP_LSHR;
case OP_ISHR_UN_IMM:
return OP_ISHR_UN;
case OP_LSHR_UN_IMM:
return OP_LSHR_UN;
case OP_IDIV_IMM:
return OP_IDIV;
case OP_LDIV_IMM:
return OP_LDIV;
case OP_IDIV_UN_IMM:
return OP_IDIV_UN;
case OP_LDIV_UN_IMM:
return OP_LDIV_UN;
case OP_IREM_UN_IMM:
return OP_IREM_UN;
case OP_LREM_UN_IMM:
return OP_LREM_UN;
case OP_IREM_IMM:
return OP_IREM;
case OP_LREM_IMM:
return OP_LREM;
case OP_DIV_IMM:
#if SIZEOF_REGISTER == 4
return OP_IDIV;
#else
return OP_LDIV;
#endif
case OP_REM_IMM:
#if SIZEOF_REGISTER == 4
return OP_IREM;
#else
return OP_LREM;
#endif
case OP_ADDCC_IMM:
return OP_ADDCC;
case OP_ADC_IMM:
return OP_ADC;
case OP_SUBCC_IMM:
return OP_SUBCC;
case OP_SBB_IMM:
return OP_SBB;
case OP_IADC_IMM:
return OP_IADC;
case OP_ISBB_IMM:
return OP_ISBB;
case OP_COMPARE_IMM:
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
case OP_LOCALLOC_IMM:
return OP_LOCALLOC;
}
return -1;
}
/*
* mono_decompose_op_imm:
*
* Replace the OP_.._IMM INS with its non IMM variant.
*/
void
mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
{
int opcode2 = mono_op_imm_to_op (ins->opcode);
MonoInst *temp;
guint32 dreg;
const char *spec = INS_INFO (ins->opcode);
if (spec [MONO_INST_SRC2] == 'l') {
dreg = mono_alloc_lreg (cfg);
/* Load the 64bit constant using decomposed ops */
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins_get_l_low (ins);
temp->dreg = MONO_LVREG_LS (dreg);
mono_bblock_insert_before_ins (bb, ins, temp);
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins_get_l_high (ins);
temp->dreg = MONO_LVREG_MS (dreg);
} else {
dreg = mono_alloc_ireg (cfg);
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = dreg;
}
mono_bblock_insert_before_ins (bb, ins, temp);
if (opcode2 == -1)
g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
ins->opcode = opcode2;
if (ins->opcode == OP_LOCALLOC)
ins->sreg1 = dreg;
else
ins->sreg2 = dreg;
bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
}
static void
set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
{
if (vreg >= cfg->vreg_to_inst_len) {
MonoInst **tmp = cfg->vreg_to_inst;
int size = cfg->vreg_to_inst_len;
while (vreg >= cfg->vreg_to_inst_len)
cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
cfg->vreg_to_inst = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
if (size)
memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
}
cfg->vreg_to_inst [vreg] = inst;
}
#define mono_type_is_long(type) (!m_type_is_byref (type) && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
#define mono_type_is_float(type) (!m_type_is_byref (type) && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
MonoInst*
mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
{
MonoInst *inst;
int num = cfg->num_varinfo;
gboolean regpair;
type = mini_get_underlying_type (type);
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
}
cfg->stat_allocate_var++;
MONO_INST_NEW (cfg, inst, opcode);
inst->inst_c0 = num;
inst->inst_vtype = type;
inst->klass = mono_class_from_mono_type_internal (type);
mini_type_to_eval_stack_type (cfg, type, inst);
/* if set to 1 the variable is native */
inst->backend.is_pinvoke = 0;
inst->dreg = vreg;
if (mono_class_has_failure (inst->klass))
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
if (cfg->compute_gc_maps) {
if (m_type_is_byref (type)) {
mono_mark_vreg_as_mp (cfg, vreg);
} else {
if ((MONO_TYPE_ISSTRUCT (type) && m_class_has_references (inst->klass)) || mini_type_is_reference (type)) {
inst->flags |= MONO_INST_GC_TRACK;
mono_mark_vreg_as_ref (cfg, vreg);
}
}
}
#ifdef TARGET_WASM
if (mini_type_is_reference (type))
mono_mark_vreg_as_ref (cfg, vreg);
#endif
cfg->varinfo [num] = inst;
cfg->vars [num].idx = num;
cfg->vars [num].vreg = vreg;
cfg->vars [num].range.first_use.pos.bid = 0xffff;
cfg->vars [num].reg = -1;
if (vreg != -1)
set_vreg_to_inst (cfg, vreg, inst);
#if SIZEOF_REGISTER == 4
if (mono_arch_is_soft_float ()) {
regpair = mono_type_is_long (type) || mono_type_is_float (type);
} else {
regpair = mono_type_is_long (type);
}
#else
regpair = FALSE;
#endif
if (regpair) {
MonoInst *tree;
/*
* These two cannot be allocated using create_var_for_vreg since that would
* put it into the cfg->varinfo array, confusing many parts of the JIT.
*/
/*
* Set flags to VOLATILE so SSA skips it.
*/
if (cfg->verbose_level >= 4) {
printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, MONO_LVREG_LS (inst->dreg), MONO_LVREG_MS (inst->dreg));
}
if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
if (mono_type_is_float (type))
inst->flags = MONO_INST_VOLATILE;
}
/* Allocate a dummy MonoInst for the first vreg */
MONO_INST_NEW (cfg, tree, OP_LOCAL);
tree->dreg = MONO_LVREG_LS (inst->dreg);
if (cfg->opt & MONO_OPT_SSA)
tree->flags = MONO_INST_VOLATILE;
tree->inst_c0 = num;
tree->type = STACK_I4;
tree->inst_vtype = mono_get_int32_type ();
tree->klass = mono_class_from_mono_type_internal (tree->inst_vtype);
set_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg), tree);
/* Allocate a dummy MonoInst for the second vreg */
MONO_INST_NEW (cfg, tree, OP_LOCAL);
tree->dreg = MONO_LVREG_MS (inst->dreg);
if (cfg->opt & MONO_OPT_SSA)
tree->flags = MONO_INST_VOLATILE;
tree->inst_c0 = num;
tree->type = STACK_I4;
tree->inst_vtype = mono_get_int32_type ();
tree->klass = mono_class_from_mono_type_internal (tree->inst_vtype);
set_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg), tree);
}
cfg->num_varinfo++;
if (cfg->verbose_level > 2)
g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
return inst;
}
MonoInst*
mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
{
int dreg;
if (type->type == MONO_TYPE_VALUETYPE && !m_type_is_byref (type)) {
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (m_class_is_enumtype (klass) && m_class_get_image (klass) == mono_get_corlib () && !strcmp (m_class_get_name (klass), "StackCrawlMark")) {
if (!(cfg->method->flags & METHOD_ATTRIBUTE_REQSECOBJ))
g_error ("Method '%s' which contains a StackCrawlMark local variable must be decorated with [System.Security.DynamicSecurityMethod].", mono_method_get_full_name (cfg->method));
}
}
type = mini_get_underlying_type (type);
if (mono_type_is_long (type))
dreg = mono_alloc_dreg (cfg, STACK_I8);
else if (mono_arch_is_soft_float () && mono_type_is_float (type))
dreg = mono_alloc_dreg (cfg, STACK_R8);
else
/* All the others are unified */
dreg = mono_alloc_preg (cfg);
return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
}
MonoInst*
mini_get_int_to_float_spill_area (MonoCompile *cfg)
{
#ifdef TARGET_X86
if (!cfg->iconv_raw_var) {
cfg->iconv_raw_var = mono_compile_create_var (cfg, mono_get_int32_type (), OP_LOCAL);
cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
}
return cfg->iconv_raw_var;
#else
return NULL;
#endif
}
void
mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
{
if (vreg >= cfg->vreg_is_ref_len) {
gboolean *tmp = cfg->vreg_is_ref;
int size = cfg->vreg_is_ref_len;
while (vreg >= cfg->vreg_is_ref_len)
cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
cfg->vreg_is_ref = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
if (size)
memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
}
cfg->vreg_is_ref [vreg] = TRUE;
}
void
mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
{
if (vreg >= cfg->vreg_is_mp_len) {
gboolean *tmp = cfg->vreg_is_mp;
int size = cfg->vreg_is_mp_len;
while (vreg >= cfg->vreg_is_mp_len)
cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
cfg->vreg_is_mp = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
if (size)
memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
}
cfg->vreg_is_mp [vreg] = TRUE;
}
static MonoType*
type_from_stack_type (MonoInst *ins)
{
switch (ins->type) {
case STACK_I4: return mono_get_int32_type ();
case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
case STACK_PTR: return mono_get_int_type ();
case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
case STACK_MP:
/*
* this if used to be commented without any specific reason, but
* it breaks #80235 when commented
*/
if (ins->klass)
return m_class_get_this_arg (ins->klass);
else
return mono_class_get_byref_type (mono_defaults.object_class);
case STACK_OBJ:
/* ins->klass may not be set for ldnull.
* Also, if we have a boxed valuetype, we want an object lass,
* not the valuetype class
*/
if (ins->klass && !m_class_is_valuetype (ins->klass))
return m_class_get_byval_arg (ins->klass);
return mono_get_object_type ();
case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
default:
g_error ("stack type %d to montype not handled\n", ins->type);
}
return NULL;
}
MonoType*
mono_type_from_stack_type (MonoInst *ins)
{
return type_from_stack_type (ins);
}
/*
* mono_add_ins_to_end:
*
* Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
*/
void
mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
{
int opcode;
if (!bb->code) {
MONO_ADD_INS (bb, inst);
return;
}
switch (bb->last_ins->opcode) {
case OP_BR:
case OP_BR_REG:
case CEE_BEQ:
case CEE_BGE:
case CEE_BGT:
case CEE_BLE:
case CEE_BLT:
case CEE_BNE_UN:
case CEE_BGE_UN:
case CEE_BGT_UN:
case CEE_BLE_UN:
case CEE_BLT_UN:
case OP_SWITCH:
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
break;
default:
if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
/* Need to insert the ins before the compare */
if (bb->code == bb->last_ins) {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
return;
}
if (bb->code->next == bb->last_ins) {
/* Only two instructions */
opcode = bb->code->opcode;
if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
/* NEW IR */
mono_bblock_insert_before_ins (bb, bb->code, inst);
} else {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
}
} else {
opcode = bb->last_ins->prev->opcode;
if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
/* NEW IR */
mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
} else {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
}
}
}
else
MONO_ADD_INS (bb, inst);
break;
}
}
void
mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
{
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
MonoJumpInfoBBTable *table;
table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
table->table = bbs;
table->table_size = num_blocks;
ji->ip.label = label;
ji->type = MONO_PATCH_INFO_SWITCH;
ji->data.table = table;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
gboolean
mini_assembly_can_skip_verification (MonoMethod *method)
{
MonoAssembly *assembly = m_class_get_image (method->klass)->assembly;
if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
return FALSE;
if (assembly->image == mono_defaults.corlib)
return FALSE;
return mono_assembly_has_skip_verification (assembly);
}
typedef struct {
MonoClass *vtype;
GList *active, *inactive;
GSList *slots;
} StackSlotInfo;
static gint
compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
{
MonoMethodVar *v1 = (MonoMethodVar*)a;
MonoMethodVar *v2 = (MonoMethodVar*)b;
if (v1 == v2)
return 0;
else if (v1->interval->range && v2->interval->range)
return v1->interval->range->from - v2->interval->range->from;
else if (v1->interval->range)
return -1;
else
return 1;
}
#if 0
#define LSCAN_DEBUG(a) do { a; } while (0)
#else
#define LSCAN_DEBUG(a) do { } while (0) /* non-empty to avoid warning */
#endif
static gint32*
mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
int i, slot, offset, size;
guint32 align;
MonoMethodVar *vmv;
MonoInst *inst;
gint32 *offsets;
GList *vars = NULL, *l, *unhandled;
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
int vtype_stack_slots_size = 256;
gboolean reuse_slot;
LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
for (i = 0; i < cfg->num_varinfo; ++i)
offsets [i] = -1;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
inst = cfg->varinfo [i];
vmv = MONO_VARINFO (cfg, i);
if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
continue;
vars = g_list_prepend (vars, vmv);
}
vars = g_list_sort (vars, compare_by_interval_start_pos_func);
/* Sanity check */
/*
i = 0;
for (unhandled = vars; unhandled; unhandled = unhandled->next) {
MonoMethodVar *current = unhandled->data;
if (current->interval->range) {
g_assert (current->interval->range->from >= i);
i = current->interval->range->from;
}
}
*/
offset = 0;
*stack_align = 0;
for (unhandled = vars; unhandled; unhandled = unhandled->next) {
MonoMethodVar *current = (MonoMethodVar *)unhandled->data;
vmv = current;
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structures */
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
}
else {
int ialign;
size = mini_type_stack_size (t, &ialign);
align = ialign;
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (t)))
align = 16;
}
reuse_slot = TRUE;
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
slot_info = &scalar_stack_slots [t->type];
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
if (!vtype_stack_slots)
vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * vtype_stack_slots_size);
for (i = 0; i < nvtypes; ++i)
if (t->data.klass == vtype_stack_slots [i].vtype)
break;
if (i < nvtypes)
slot_info = &vtype_stack_slots [i];
else {
if (nvtypes == vtype_stack_slots_size) {
int new_slots_size = vtype_stack_slots_size * 2;
StackSlotInfo* new_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * new_slots_size);
memcpy (new_slots, vtype_stack_slots, sizeof (StackSlotInfo) * vtype_stack_slots_size);
vtype_stack_slots = new_slots;
vtype_stack_slots_size = new_slots_size;
}
vtype_stack_slots [nvtypes].vtype = t->data.klass;
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
if (cfg->disable_ref_noref_stack_slot_share) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
/* Fall through */
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
default:
slot_info = &scalar_stack_slots [t->type];
}
slot = 0xffffff;
if (cfg->comp_done & MONO_COMP_LIVENESS) {
int pos;
gboolean changed;
//printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
if (!current->interval->range) {
if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
pos = ~0;
else {
/* Dead */
inst->flags |= MONO_INST_IS_DEAD;
continue;
}
}
else
pos = current->interval->range->from;
LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
if (current->interval->range)
LSCAN_DEBUG (mono_linterval_print (current->interval));
LSCAN_DEBUG (printf ("\n"));
/* Check for intervals in active which expired or inactive */
changed = TRUE;
/* FIXME: Optimize this */
while (changed) {
changed = FALSE;
for (l = slot_info->active; l != NULL; l = l->next) {
MonoMethodVar *v = (MonoMethodVar*)l->data;
if (v->interval->last_range->to < pos) {
slot_info->active = g_list_delete_link (slot_info->active, l);
slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
changed = TRUE;
break;
}
else if (!mono_linterval_covers (v->interval, pos)) {
slot_info->inactive = g_list_append (slot_info->inactive, v);
slot_info->active = g_list_delete_link (slot_info->active, l);
LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
changed = TRUE;
break;
}
}
}
/* Check for intervals in inactive which expired or active */
changed = TRUE;
/* FIXME: Optimize this */
while (changed) {
changed = FALSE;
for (l = slot_info->inactive; l != NULL; l = l->next) {
MonoMethodVar *v = (MonoMethodVar*)l->data;
if (v->interval->last_range->to < pos) {
slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
// FIXME: Enabling this seems to cause impossible to debug crashes
//slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
changed = TRUE;
break;
}
else if (mono_linterval_covers (v->interval, pos)) {
slot_info->active = g_list_append (slot_info->active, v);
slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
changed = TRUE;
break;
}
}
}
/*
* This also handles the case when the variable is used in an
* exception region, as liveness info is not computed there.
*/
/*
* FIXME: All valuetypes are marked as INDIRECT because of LDADDR
* opcodes.
*/
if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
if (slot_info->slots) {
slot = GPOINTER_TO_INT (slot_info->slots->data);
slot_info->slots = slot_info->slots->next;
}
/* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
}
}
#if 0
{
static int count = 0;
count ++;
if (count == atoi (g_getenv ("COUNT3")))
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
if (count > atoi (g_getenv ("COUNT3")))
slot = 0xffffff;
else
mono_print_ins (inst);
}
#endif
LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
if (inst->flags & MONO_INST_LMF) {
size = MONO_ABI_SIZEOF (MonoLMF);
align = sizeof (target_mgreg_t);
reuse_slot = FALSE;
}
if (!reuse_slot)
slot = 0xffffff;
if (slot == 0xffffff) {
/*
* Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
* efficient copying (and to work around the fact that OP_MEMCPY
* and OP_MEMSET ignores alignment).
*/
if (MONO_TYPE_ISSTRUCT (t)) {
align = MAX (align, sizeof (target_mgreg_t));
align = MAX (align, mono_class_min_align (mono_class_from_mono_type_internal (t)));
}
if (backward) {
offset += size;
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
}
else {
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
offset += size;
}
if (*stack_align == 0)
*stack_align = align;
}
offsets [vmv->idx] = slot;
}
g_list_free (vars);
for (i = 0; i < MONO_TYPE_PINNED; ++i) {
if (scalar_stack_slots [i].active)
g_list_free (scalar_stack_slots [i].active);
}
for (i = 0; i < nvtypes; ++i) {
if (vtype_stack_slots [i].active)
g_list_free (vtype_stack_slots [i].active);
}
cfg->stat_locals_stack_size += offset;
*stack_size = offset;
return offsets;
}
/*
* mono_allocate_stack_slots:
*
* Allocate stack slots for all non register allocated variables using a
* linear scan algorithm.
* Returns: an array of stack offsets.
* STACK_SIZE is set to the amount of stack space needed.
* STACK_ALIGN is set to the alignment needed by the locals area.
*/
gint32*
mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
int i, slot, offset, size;
guint32 align;
MonoMethodVar *vmv;
MonoInst *inst;
gint32 *offsets;
GList *vars = NULL, *l;
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
int vtype_stack_slots_size = 256;
gboolean reuse_slot;
if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
for (i = 0; i < cfg->num_varinfo; ++i)
offsets [i] = -1;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
inst = cfg->varinfo [i];
vmv = MONO_VARINFO (cfg, i);
if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
continue;
vars = g_list_prepend (vars, vmv);
}
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
*stack_align = sizeof (target_mgreg_t);
for (l = vars; l; l = l->next) {
vmv = (MonoMethodVar *)l->data;
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structures */
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
} else {
int ialign;
size = mini_type_stack_size (t, &ialign);
align = ialign;
if (mono_class_has_failure (mono_class_from_mono_type_internal (t)))
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (t)))
align = 16;
}
reuse_slot = TRUE;
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
slot_info = &scalar_stack_slots [t->type];
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
if (!vtype_stack_slots)
vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * vtype_stack_slots_size);
for (i = 0; i < nvtypes; ++i)
if (t->data.klass == vtype_stack_slots [i].vtype)
break;
if (i < nvtypes)
slot_info = &vtype_stack_slots [i];
else {
if (nvtypes == vtype_stack_slots_size) {
int new_slots_size = vtype_stack_slots_size * 2;
StackSlotInfo* new_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * new_slots_size);
memcpy (new_slots, vtype_stack_slots, sizeof (StackSlotInfo) * vtype_stack_slots_size);
vtype_stack_slots = new_slots;
vtype_stack_slots_size = new_slots_size;
}
vtype_stack_slots [nvtypes].vtype = t->data.klass;
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
if (cfg->disable_ref_noref_stack_slot_share) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
/* Fall through */
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
slot_info = &scalar_stack_slots [t->type];
break;
default:
slot_info = &scalar_stack_slots [t->type];
break;
}
slot = 0xffffff;
if (cfg->comp_done & MONO_COMP_LIVENESS) {
//printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
/* expire old intervals in active */
while (slot_info->active) {
MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
break;
//printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
}
/*
* This also handles the case when the variable is used in an
* exception region, as liveness info is not computed there.
*/
/*
* FIXME: All valuetypes are marked as INDIRECT because of LDADDR
* opcodes.
*/
if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
if (slot_info->slots) {
slot = GPOINTER_TO_INT (slot_info->slots->data);
slot_info->slots = slot_info->slots->next;
}
slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
}
}
#if 0
{
static int count = 0;
count ++;
if (count == atoi (g_getenv ("COUNT")))
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
if (count > atoi (g_getenv ("COUNT")))
slot = 0xffffff;
else
mono_print_ins (inst);
}
#endif
if (inst->flags & MONO_INST_LMF) {
/*
* This variable represents a MonoLMF structure, which has no corresponding
* CLR type, so hard-code its size/alignment.
*/
size = MONO_ABI_SIZEOF (MonoLMF);
align = sizeof (target_mgreg_t);
reuse_slot = FALSE;
}
if (!reuse_slot)
slot = 0xffffff;
if (slot == 0xffffff) {
/*
* Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
* efficient copying (and to work around the fact that OP_MEMCPY
* and OP_MEMSET ignores alignment).
*/
if (MONO_TYPE_ISSTRUCT (t)) {
align = MAX (align, sizeof (target_mgreg_t));
align = MAX (align, mono_class_min_align (mono_class_from_mono_type_internal (t)));
/*
* Align the size too so the code generated for passing vtypes in
* registers doesn't overwrite random locals.
*/
size = (size + (align - 1)) & ~(align -1);
}
if (backward) {
offset += size;
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
}
else {
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
offset += size;
}
*stack_align = MAX (*stack_align, align);
}
offsets [vmv->idx] = slot;
}
g_list_free (vars);
for (i = 0; i < MONO_TYPE_PINNED; ++i) {
if (scalar_stack_slots [i].active)
g_list_free (scalar_stack_slots [i].active);
}
for (i = 0; i < nvtypes; ++i) {
if (vtype_stack_slots [i].active)
g_list_free (vtype_stack_slots [i].active);
}
cfg->stat_locals_stack_size += offset;
*stack_size = offset;
return offsets;
}
#define EMUL_HIT_SHIFT 3
#define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
/* small hit bitmap cache */
static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
static short emul_opcode_num = 0;
static short emul_opcode_alloced = 0;
static short *emul_opcode_opcodes;
static MonoJitICallInfo **emul_opcode_map;
MonoJitICallInfo *
mono_find_jit_opcode_emulation (int opcode)
{
g_assert (opcode >= 0 && opcode <= OP_LAST);
if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
int i;
for (i = 0; i < emul_opcode_num; ++i) {
if (emul_opcode_opcodes [i] == opcode)
return emul_opcode_map [i];
}
}
return NULL;
}
void
mini_register_opcode_emulation (int opcode, MonoJitICallInfo *info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_wrapper)
{
g_assert (info);
g_assert (!sig->hasthis);
g_assert (sig->param_count < 3);
mono_register_jit_icall_info (info, func, name, sig, no_wrapper, symbol);
if (emul_opcode_num >= emul_opcode_alloced) {
int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
emul_opcode_alloced += incr;
emul_opcode_map = (MonoJitICallInfo **)g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
emul_opcode_opcodes = (short *)g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
}
emul_opcode_map [emul_opcode_num] = info;
emul_opcode_opcodes [emul_opcode_num] = opcode;
emul_opcode_num++;
emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
}
static void
print_dfn (MonoCompile *cfg)
{
int i, j;
char *code;
MonoBasicBlock *bb;
MonoInst *c;
{
char *method_name = mono_method_full_name (cfg->method, TRUE);
g_print ("IR code for method %s\n", method_name);
g_free (method_name);
}
for (i = 0; i < cfg->num_bblocks; ++i) {
bb = cfg->bblocks [i];
/*if (bb->cil_code) {
char* code1, *code2;
code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
if (bb->last_ins->cil_code)
code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
else
code2 = g_strdup ("");
code1 [strlen (code1) - 1] = 0;
code = g_strdup_printf ("%s -> %s", code1, code2);
g_free (code1);
g_free (code2);
} else*/
code = g_strdup ("\n");
g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
MONO_BB_FOR_EACH_INS (bb, c) {
mono_print_ins_index (-1, c);
}
g_print ("\tprev:");
for (j = 0; j < bb->in_count; ++j) {
g_print (" BB%d", bb->in_bb [j]->block_num);
}
g_print ("\t\tsucc:");
for (j = 0; j < bb->out_count; ++j) {
g_print (" BB%d", bb->out_bb [j]->block_num);
}
g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
if (bb->idom)
g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
if (bb->dominators)
mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
if (bb->dfrontier)
mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
g_free (code);
}
g_print ("\n");
}
void
mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
{
MONO_ADD_INS (bb, inst);
}
void
mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
{
if (ins == NULL) {
ins = bb->code;
bb->code = ins_to_insert;
/* Link with next */
ins_to_insert->next = ins;
if (ins)
ins->prev = ins_to_insert;
if (bb->last_ins == NULL)
bb->last_ins = ins_to_insert;
} else {
/* Link with next */
ins_to_insert->next = ins->next;
if (ins->next)
ins->next->prev = ins_to_insert;
/* Link with previous */
ins->next = ins_to_insert;
ins_to_insert->prev = ins;
if (bb->last_ins == ins)
bb->last_ins = ins_to_insert;
}
}
void
mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
{
if (ins == NULL) {
ins = bb->code;
if (ins)
ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
bb->last_ins = ins_to_insert;
} else {
/* Link with previous */
if (ins->prev)
ins->prev->next = ins_to_insert;
ins_to_insert->prev = ins->prev;
/* Link with next */
ins->prev = ins_to_insert;
ins_to_insert->next = ins;
if (bb->code == ins)
bb->code = ins_to_insert;
}
}
/*
* mono_verify_bblock:
*
* Verify that the next and prev pointers are consistent inside the instructions in BB.
*/
void
mono_verify_bblock (MonoBasicBlock *bb)
{
MonoInst *ins, *prev;
prev = NULL;
for (ins = bb->code; ins; ins = ins->next) {
g_assert (ins->prev == prev);
prev = ins;
}
if (bb->last_ins)
g_assert (!bb->last_ins->next);
}
/*
* mono_verify_cfg:
*
* Perform consistency checks on the JIT data structures and the IR
*/
void
mono_verify_cfg (MonoCompile *cfg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
mono_verify_bblock (bb);
}
// This will free many fields in cfg to save
// memory. Note that this must be safe to call
// multiple times. It must be idempotent.
void
mono_empty_compile (MonoCompile *cfg)
{
mono_free_loop_info (cfg);
// These live in the mempool, and so must be freed
// first
for (GSList *l = cfg->headers_to_free; l; l = l->next) {
mono_metadata_free_mh ((MonoMethodHeader *)l->data);
}
cfg->headers_to_free = NULL;
if (cfg->mempool) {
//mono_mempool_stats (cfg->mempool);
mono_mempool_destroy (cfg->mempool);
cfg->mempool = NULL;
}
g_free (cfg->varinfo);
cfg->varinfo = NULL;
g_free (cfg->vars);
cfg->vars = NULL;
if (cfg->rs) {
mono_regstate_free (cfg->rs);
cfg->rs = NULL;
}
}
void
mono_destroy_compile (MonoCompile *cfg)
{
mono_empty_compile (cfg);
mono_metadata_free_mh (cfg->header);
g_hash_table_destroy (cfg->spvars);
g_hash_table_destroy (cfg->exvars);
g_list_free (cfg->ldstr_list);
g_hash_table_destroy (cfg->token_info_hash);
g_hash_table_destroy (cfg->abs_patches);
mono_debug_free_method (cfg);
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg->exception_message);
g_free (cfg);
}
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
if (type == MONO_PATCH_INFO_NONE)
return;
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->data.target = target;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
void
mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
{
if (type == MONO_PATCH_INFO_NONE)
return;
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->relocation = relocation;
ji->data.target = target;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
void
mono_remove_patch_info (MonoCompile *cfg, int ip)
{
MonoJumpInfo **ji = &cfg->patch_info;
while (*ji) {
if ((*ji)->ip.i == ip)
*ji = (*ji)->next;
else
ji = &((*ji)->next);
}
}
void
mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
{
ins->inst_offset = native_offset;
g_ptr_array_add (cfg->seq_points, ins);
if (bb) {
bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
bb->last_seq_point = ins;
}
}
void
mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
{
MonoDwarfLocListEntry *entry = (MonoDwarfLocListEntry *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
if (is_reg)
g_assert (offset == 0);
entry->is_reg = is_reg;
entry->reg = reg;
entry->offset = offset;
entry->from = from;
entry->to = to;
if (var == cfg->args [0])
cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
else if (var == cfg->rgctx_var)
cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
}
static void
mono_apply_volatile (MonoInst *inst, MonoBitSet *set, gsize index)
{
inst->flags |= mono_bitset_test_safe (set, index) ? MONO_INST_VOLATILE : 0;
}
static void
mono_compile_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i;
header = cfg->header;
sig = mono_method_signature_internal (cfg->method);
if (!MONO_TYPE_IS_VOID (sig->ret)) {
cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
/* Inhibit optimizations */
cfg->ret->flags |= MONO_INST_VOLATILE;
}
if (cfg->verbose_level > 2)
g_print ("creating vars\n");
cfg->args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
if (sig->hasthis) {
MonoInst* arg = mono_compile_create_var (cfg, m_class_get_this_arg (cfg->method->klass), OP_ARG);
mono_apply_volatile (arg, header->volatile_args, 0);
cfg->args [0] = arg;
cfg->this_arg = arg;
}
for (i = 0; i < sig->param_count; ++i) {
MonoInst* arg = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
mono_apply_volatile (arg, header->volatile_args, i + sig->hasthis);
cfg->args [i + sig->hasthis] = arg;
}
if (cfg->verbose_level > 2) {
if (cfg->ret) {
printf ("\treturn : ");
mono_print_ins (cfg->ret);
}
if (sig->hasthis) {
printf ("\tthis: ");
mono_print_ins (cfg->args [0]);
}
for (i = 0; i < sig->param_count; ++i) {
printf ("\targ [%d]: ", i);
mono_print_ins (cfg->args [i + sig->hasthis]);
}
}
cfg->locals_start = cfg->num_varinfo;
cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
if (cfg->verbose_level > 2)
g_print ("creating locals\n");
for (i = 0; i < header->num_locals; ++i) {
if (cfg->verbose_level > 2)
g_print ("\tlocal [%d]: ", i);
cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
mono_apply_volatile (cfg->locals [i], header->volatile_locals, i);
}
if (cfg->verbose_level > 2)
g_print ("locals done\n");
#ifdef ENABLE_LLVM
if (COMPILE_LLVM (cfg))
mono_llvm_create_vars (cfg);
else
mono_arch_create_vars (cfg);
#else
mono_arch_create_vars (cfg);
#endif
if (cfg->method->save_lmf && cfg->create_lmf_var) {
MonoInst *lmf_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
lmf_var->flags |= MONO_INST_VOLATILE;
lmf_var->flags |= MONO_INST_LMF;
cfg->lmf_var = lmf_var;
}
}
void
mono_print_code (MonoCompile *cfg, const char* msg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
mono_print_bb (bb, msg);
}
static void
mono_postprocess_patches (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
int i;
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_ABS: {
/*
* Change patches of type MONO_PATCH_INFO_ABS into patches describing the
* absolute address.
*/
if (cfg->abs_patches) {
MonoJumpInfo *abs_ji = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
if (abs_ji) {
patch_info->type = abs_ji->type;
patch_info->data.target = abs_ji->data.target;
}
}
break;
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
if (cfg->method->dynamic) {
table = (void **)mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = (void **)mono_mem_manager_code_reserve (cfg->mem_manager, sizeof (gpointer) * patch_info->data.table->table_size);
}
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
if (patch_info->data.table->table [i]) {
g_assert (patch_info->data.table->table [i]->native_offset);
table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
} else {
table [i] = NULL;
}
}
patch_info->data.table->table = (MonoBasicBlock**)table;
break;
}
default:
/* do nothing */
break;
}
}
}
/* Those patches require the JitInfo of the compiled method already be in place when used */
static void
mono_postprocess_patches_after_ji_publish (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_METHOD_JUMP: {
unsigned char *ip = cfg->native_code + patch_info->ip.i;
mini_register_jump_site (patch_info->data.method, ip);
break;
}
default:
/* do nothing */
break;
}
}
}
void
mono_codegen (MonoCompile *cfg)
{
MonoBasicBlock *bb;
int max_epilog_size;
guint8 *code;
MonoMemoryManager *code_mem_manager = cfg->mem_manager;
guint unwindlen = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* we reuse dfn here */
/* bb->dfn = bb_count++; */
mono_arch_lowering_pass (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
mono_bb_deduplicate_op_il_seq_points (cfg, bb);
}
code = mono_arch_emit_prolog (cfg);
set_code_cursor (cfg, code);
cfg->prolog_end = cfg->code_len;
cfg->cfa_reg = cfg->cur_cfa_reg;
cfg->cfa_offset = cfg->cur_cfa_offset;
mono_debug_open_method (cfg);
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
bb->real_native_offset = cfg->code_len;
//if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
mono_arch_output_basic_block (cfg, bb);
bb->native_length = cfg->code_len - bb->native_offset;
if (bb == cfg->bb_exit) {
cfg->epilog_begin = cfg->code_len;
mono_arch_emit_epilog (cfg);
cfg->epilog_end = cfg->code_len;
}
if (bb->clause_holes) {
GList *tmp;
for (tmp = bb->clause_holes; tmp; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, cfg->native_code + bb->native_offset, bb);
}
}
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
cfg->code_size = cfg->code_len + max_epilog_size;
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
if (!cfg->compile_aot)
unwindlen = mono_arch_unwindinfo_init_method_unwind_info (cfg);
#endif
if (cfg->method->dynamic) {
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
jit_mm_lock (jit_mm);
if (!jit_mm->dynamic_code_hash)
jit_mm->dynamic_code_hash = g_hash_table_new (NULL, NULL);
g_hash_table_insert (jit_mm->dynamic_code_hash, cfg->method, cfg->dynamic_info);
jit_mm_unlock (jit_mm);
code = (guint8 *)mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
} else {
code = (guint8 *)mono_mem_manager_code_reserve (code_mem_manager, cfg->code_size + cfg->thunk_area + unwindlen);
}
mono_codeman_enable_write ();
if (cfg->thunk_area) {
cfg->thunks_offset = cfg->code_size + unwindlen;
cfg->thunks = code + cfg->thunks_offset;
memset (cfg->thunks, 0, cfg->thunk_area);
}
g_assert (code);
memcpy (code, cfg->native_code, cfg->code_len);
g_free (cfg->native_code);
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
/* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
mono_postprocess_patches (cfg);
#ifdef VALGRIND_JIT_REGISTER_MAP
if (valgrind_register){
char* nm = mono_method_full_name (cfg->method, TRUE);
VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
g_free (nm);
}
#endif
if (cfg->verbose_level > 0) {
char* nm = mono_method_get_full_name (cfg->method);
g_print ("Method %s emitted at %p to %p (code length %d)\n",
nm,
cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len);
g_free (nm);
}
{
gboolean is_generic = FALSE;
if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
mono_class_is_gtd (cfg->method->klass) || mono_class_is_ginst (cfg->method->klass)) {
is_generic = TRUE;
}
if (cfg->gshared)
g_assert (is_generic);
}
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
{
MonoJumpInfo *ji;
gpointer target;
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (cfg->compile_aot) {
switch (ji->type) {
case MONO_PATCH_INFO_BB:
case MONO_PATCH_INFO_LABEL:
break;
default:
/* No need to patch these */
continue;
}
}
if (ji->type == MONO_PATCH_INFO_NONE)
continue;
target = mono_resolve_patch_target (cfg->method, cfg->native_code, ji, cfg->run_cctors, cfg->error);
if (!is_ok (cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
mono_arch_patch_code_new (cfg, cfg->native_code, ji, target);
}
}
if (cfg->method->dynamic) {
mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
} else {
mono_mem_manager_code_commit (code_mem_manager, cfg->native_code, cfg->code_size, cfg->code_len);
}
mono_codeman_disable_write ();
MONO_PROFILER_RAISE (jit_code_buffer, (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method));
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
mono_debug_close_method (cfg);
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
if (!cfg->compile_aot)
mono_arch_unwindinfo_install_method_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
#endif
}
static void
compute_reachable (MonoBasicBlock *bb)
{
int i;
if (!(bb->flags & BB_VISITED)) {
bb->flags |= BB_VISITED;
for (i = 0; i < bb->out_count; ++i)
compute_reachable (bb->out_bb [i]);
}
}
static void mono_bb_ordering (MonoCompile *cfg)
{
int dfn = 0;
/* Depth-first ordering on basic blocks */
cfg->bblocks = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
cfg->max_block_num = cfg->num_bblocks;
df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
#if defined(__GNUC__) && __GNUC__ == 7 && defined(__x86_64__)
/* workaround for an AMD specific issue that only happens on GCC 7 so far,
* for more information see https://github.com/mono/mono/issues/9298 */
mono_memory_barrier ();
#endif
g_assertf (cfg->num_bblocks >= dfn, "cfg->num_bblocks=%d, dfn=%d\n", cfg->num_bblocks, dfn);
if (cfg->num_bblocks != dfn + 1) {
MonoBasicBlock *bb;
cfg->num_bblocks = dfn + 1;
/* remove unreachable code, because the code in them may be
* inconsistent (access to dead variables for example) */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
bb->flags &= ~BB_VISITED;
compute_reachable (cfg->bb_entry);
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
if (bb->flags & BB_EXCEPTION_HANDLER)
compute_reachable (bb);
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (!(bb->flags & BB_VISITED)) {
if (cfg->verbose_level > 1)
g_print ("found unreachable code in BB%d\n", bb->block_num);
bb->code = bb->last_ins = NULL;
while (bb->out_count)
mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
}
}
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
bb->flags &= ~BB_VISITED;
}
}
static void
mono_handle_out_of_line_bblock (MonoCompile *cfg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (bb, ins);
ins->inst_target_bb = bb->next_bb;
}
}
}
static MonoJitInfo*
create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
{
GSList *tmp;
MonoMethodHeader *header;
MonoJitInfo *jinfo;
MonoJitInfoFlags flags = JIT_INFO_NONE;
int num_clauses, num_holes = 0;
guint32 stack_size = 0;
g_assert (method_to_compile == cfg->method);
header = cfg->header;
if (cfg->gshared)
flags |= JIT_INFO_HAS_GENERIC_JIT_INFO;
if (cfg->arch_eh_jit_info) {
MonoJitArgumentInfo *arg_info;
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method_to_register);
/*
* This cannot be computed during stack walking, as
* mono_arch_get_argument_info () is not signal safe.
*/
arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
if (stack_size)
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
}
if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
if (cfg->thunk_area)
flags |= JIT_INFO_HAS_THUNK_INFO;
if (cfg->try_block_holes) {
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
TryBlockHole *hole = (TryBlockHole *)tmp->data;
MonoExceptionClause *ec = hole->clause;
int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
g_assert (clause_last_bb);
/* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
if (clause_last_bb->native_offset != hole_end)
++num_holes;
}
if (num_holes)
flags |= JIT_INFO_HAS_TRY_BLOCK_HOLES;
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("Number of try block holes %d\n", num_holes);
}
if (COMPILE_LLVM (cfg)) {
num_clauses = cfg->llvm_ex_info_len;
} else {
num_clauses = header->num_clauses;
int dead_clauses = 0;
for (int i = 0; i < header->num_clauses; ++i)
if (cfg->clause_is_dead [i])
dead_clauses ++;
num_clauses -= dead_clauses;
}
if (cfg->method->dynamic)
jinfo = (MonoJitInfo *)g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
else
jinfo = (MonoJitInfo *)mono_mem_manager_alloc0 (cfg->mem_manager, mono_jit_info_size (flags, num_clauses, num_holes));
jinfo_try_holes_size += num_holes * sizeof (MonoTryBlockHoleJitInfo);
mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
if (COMPILE_LLVM (cfg))
jinfo->from_llvm = TRUE;
if (cfg->gshared) {
MonoInst *inst;
MonoGenericJitInfo *gi;
GSList *loclist = NULL;
gi = mono_jit_info_get_generic_jit_info (jinfo);
g_assert (gi);
if (cfg->method->dynamic)
gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
else
gi->generic_sharing_context = (MonoGenericSharingContext *)mono_mem_manager_alloc0 (cfg->mem_manager, sizeof (MonoGenericSharingContext));
mini_init_gsctx (NULL, cfg->gsctx_context, gi->generic_sharing_context);
if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method_to_compile)->method_inst ||
m_class_is_valuetype (method_to_compile->klass)) {
g_assert (cfg->rgctx_var);
}
gi->has_this = 1;
if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method_to_compile)->method_inst ||
m_class_is_valuetype (method_to_compile->klass)) {
inst = cfg->rgctx_var;
if (!COMPILE_LLVM (cfg))
g_assert (inst->opcode == OP_REGOFFSET);
loclist = cfg->rgctx_loclist;
} else {
inst = cfg->args [0];
loclist = cfg->this_loclist;
}
if (loclist) {
/* Needed to handle async exceptions */
GSList *l;
int i;
gi->nlocs = g_slist_length (loclist);
if (cfg->method->dynamic)
gi->locations = (MonoDwarfLocListEntry *)g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
else
gi->locations = (MonoDwarfLocListEntry *)mono_mem_manager_alloc0 (cfg->mem_manager, gi->nlocs * sizeof (MonoDwarfLocListEntry));
i = 0;
for (l = loclist; l; l = l->next) {
memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
i ++;
}
}
if (COMPILE_LLVM (cfg)) {
g_assert (cfg->llvm_this_reg != -1);
gi->this_in_reg = 0;
gi->this_reg = cfg->llvm_this_reg;
gi->this_offset = cfg->llvm_this_offset;
} else if (inst->opcode == OP_REGVAR) {
gi->this_in_reg = 1;
gi->this_reg = inst->dreg;
} else {
g_assert (inst->opcode == OP_REGOFFSET);
#ifdef TARGET_X86
g_assert (inst->inst_basereg == X86_EBP);
#elif defined(TARGET_AMD64)
g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
#endif
g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
gi->this_in_reg = 0;
gi->this_reg = inst->inst_basereg;
gi->this_offset = inst->inst_offset;
}
}
if (num_holes) {
MonoTryBlockHoleTableJitInfo *table;
int i;
table = mono_jit_info_get_try_block_hole_table_info (jinfo);
table->num_holes = (guint16)num_holes;
i = 0;
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
guint32 start_bb_offset;
MonoTryBlockHoleJitInfo *hole;
TryBlockHole *hole_data = (TryBlockHole *)tmp->data;
MonoExceptionClause *ec = hole_data->clause;
int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
g_assert (clause_last_bb);
/* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
if (clause_last_bb->native_offset == hole_end)
continue;
start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
hole = &table->holes [i++];
hole->clause = hole_data->clause - &header->clauses [0];
hole->offset = (guint32)hole_data->start_offset;
hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
}
g_assert (i == num_holes);
}
if (jinfo->has_arch_eh_info) {
MonoArchEHJitInfo *info;
info = mono_jit_info_get_arch_eh_info (jinfo);
info->stack_size = stack_size;
}
if (cfg->thunk_area) {
MonoThunkJitInfo *info;
info = mono_jit_info_get_thunk_info (jinfo);
info->thunks_offset = cfg->thunks_offset;
info->thunks_size = cfg->thunk_area;
}
if (COMPILE_LLVM (cfg)) {
if (num_clauses)
memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
} else {
int eindex = 0;
for (int i = 0; i < header->num_clauses; i++) {
MonoExceptionClause *ec = &header->clauses [i];
MonoJitExceptionInfo *ei = &jinfo->clauses [eindex];
MonoBasicBlock *tblock;
MonoInst *exvar;
if (cfg->clause_is_dead [i])
continue;
eindex ++;
ei->flags = ec->flags;
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
ei->exvar_offset = exvar ? exvar->inst_offset : 0;
if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
g_assert (tblock);
ei->data.filter = cfg->native_code + tblock->native_offset;
} else {
ei->data.catch_class = ec->data.catch_class;
}
tblock = cfg->cil_offset_to_bb [ec->try_offset];
g_assert (tblock);
g_assert (tblock->native_offset);
ei->try_start = cfg->native_code + tblock->native_offset;
if (tblock->extend_try_block) {
/*
* Extend the try block backwards to include parts of the previous call
* instruction.
*/
ei->try_start = (guint8*)ei->try_start - cfg->backend->monitor_enter_adjustment;
}
if (ec->try_offset + ec->try_len < header->code_size)
tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
else
tblock = cfg->bb_exit;
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec->try_offset, ec->try_len, tblock, header->code_size);
g_assert (tblock);
if (!tblock->native_offset) {
int j, end;
for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
if (bb && bb->native_offset) {
tblock = bb;
break;
}
}
}
ei->try_end = cfg->native_code + tblock->native_offset;
g_assert (tblock->native_offset);
tblock = cfg->cil_offset_to_bb [ec->handler_offset];
g_assert (tblock);
ei->handler_start = cfg->native_code + tblock->native_offset;
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
TryBlockHole *hole = (TryBlockHole *)tmp->data;
gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
if (hole->clause == ec && hole_end == ei->try_end) {
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
ei->try_end = cfg->native_code + hole->start_offset;
break;
}
}
if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
int end_offset;
if (ec->handler_offset + ec->handler_len < header->code_size) {
tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
if (tblock->native_offset) {
end_offset = tblock->native_offset;
} else {
int j, end;
for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
if (bb && bb->native_offset) {
tblock = bb;
break;
}
}
end_offset = tblock->native_offset + tblock->native_length;
}
} else {
end_offset = cfg->epilog_begin;
}
ei->data.handler_end = cfg->native_code + end_offset;
}
/* Keep try_start/end non-authenticated, they are never branched to */
//ei->try_start = MINI_ADDR_TO_FTNPTR (ei->try_start);
//ei->try_end = MINI_ADDR_TO_FTNPTR (ei->try_end);
ei->handler_start = MINI_ADDR_TO_FTNPTR (ei->handler_start);
if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER)
ei->data.filter = MINI_ADDR_TO_FTNPTR (ei->data.filter);
else if (ei->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
ei->data.handler_end = MINI_ADDR_TO_FTNPTR (ei->data.handler_end);
}
}
if (G_UNLIKELY (cfg->verbose_level >= 4)) {
int i;
for (i = 0; i < jinfo->num_clauses; i++) {
MonoJitExceptionInfo *ei = &jinfo->clauses [i];
int start = (guint8*)ei->try_start - cfg->native_code;
int end = (guint8*)ei->try_end - cfg->native_code;
int handler = (guint8*)ei->handler_start - cfg->native_code;
int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
}
}
if (cfg->encoded_unwind_ops) {
/* Generated by LLVM */
jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
g_free (cfg->encoded_unwind_ops);
} else if (cfg->unwind_ops) {
guint32 info_len;
guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
guint32 unwind_desc;
unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
if (cfg->has_unwind_info_for_epilog) {
MonoArchEHJitInfo *info;
info = mono_jit_info_get_arch_eh_info (jinfo);
g_assert (info);
info->epilog_size = cfg->code_len - cfg->epilog_begin;
}
jinfo->unwind_info = unwind_desc;
g_free (unwind_info);
} else {
jinfo->unwind_info = cfg->used_int_regs;
}
return jinfo;
}
/* Return whenever METHOD is a gsharedvt method */
static gboolean
is_gsharedvt_method (MonoMethod *method)
{
MonoGenericContext *context;
MonoGenericInst *inst;
int i;
if (!method->is_inflated)
return FALSE;
context = mono_method_get_context (method);
inst = context->class_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
inst = context->method_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
return FALSE;
}
static gboolean
is_open_method (MonoMethod *method)
{
MonoGenericContext *context;
if (!method->is_inflated)
return FALSE;
context = mono_method_get_context (method);
if (context->class_inst && context->class_inst->is_open)
return TRUE;
if (context->method_inst && context->method_inst->is_open)
return TRUE;
return FALSE;
}
static void
mono_insert_nop_in_empty_bb (MonoCompile *cfg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->code)
continue;
MonoInst *nop;
MONO_INST_NEW (cfg, nop, OP_NOP);
MONO_ADD_INS (bb, nop);
}
}
static void
insert_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
{
MonoInst *poll_addr, *ins;
if (cfg->disable_gc_safe_points)
return;
if (cfg->verbose_level > 1)
printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
g_assert (mini_safepoints_enabled ());
NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
ins->sreg1 = poll_addr->dreg;
if (bblock->flags & BB_EXCEPTION_HANDLER) {
MonoInst *eh_op = bblock->code;
if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
eh_op = NULL;
} else {
MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
// skip all EH relateds ops
while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
eh_op = next_eh_op;
next_eh_op = eh_op->next;
}
}
mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
} else if (bblock == cfg->bb_entry) {
mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
} else {
mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
}
}
/*
This code inserts safepoints into managed code at important code paths.
Those are:
-the first basic block
-landing BB for exception handlers
-loop body starts.
*/
static void
insert_safepoints (MonoCompile *cfg)
{
MonoBasicBlock *bb;
g_assert (mini_safepoints_enabled ());
if (COMPILE_LLVM (cfg)) {
if (!cfg->llvm_only) {
/* We rely on LLVM's safepoints insertion capabilities. */
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for code compiled with LLVM\n");
return;
}
}
if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
/* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER &&
(info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_threads_state_poll ||
info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_thread_interruption_checkpoint ||
info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_threads_exit_gc_safe_region_unbalanced)) {
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
return;
}
}
if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
return;
}
if (cfg->method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
if (info && (info->subtype == WRAPPER_SUBTYPE_INTERP_IN || info->subtype == WRAPPER_SUBTYPE_INTERP_LMF)) {
/* These wrappers shouldn't do any icalls */
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for interp-in wrappers.\n");
return;
}
}
if (cfg->method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER) {
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for write barrier wrappers.\n");
return;
}
if (cfg->verbose_level > 1)
printf ("INSERTING SAFEPOINTS\n");
if (cfg->verbose_level > 2)
mono_print_code (cfg, "BEFORE SAFEPOINTS");
/* if the method doesn't contain
* (1) a call (so it's a leaf method)
* (2) and no loops
* we can skip the GC safepoint on method entry. */
gboolean requires_safepoint = cfg->has_calls;
for (bb = cfg->bb_entry->next_bb; bb; bb = bb->next_bb) {
if (bb->loop_body_start || (bb->flags & BB_EXCEPTION_HANDLER)) {
requires_safepoint = TRUE;
insert_safepoint (cfg, bb);
}
}
if (requires_safepoint)
insert_safepoint (cfg, cfg->bb_entry);
if (cfg->verbose_level > 2)
mono_print_code (cfg, "AFTER SAFEPOINTS");
}
static void
mono_insert_branches_between_bblocks (MonoCompile *cfg)
{
MonoBasicBlock *bb;
/* Add branches between non-consecutive bblocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
/* we are careful when inverting, since bugs like #59580
* could show up when dealing with NaNs.
*/
if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
bb->last_ins->inst_false_bb = tmp;
bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
} else {
MonoInst *inst = (MonoInst *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
inst->opcode = OP_BR;
inst->inst_target_bb = bb->last_ins->inst_false_bb;
mono_bblock_add_inst (bb, inst);
}
}
}
if (cfg->verbose_level >= 4) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *tree = bb->code;
g_print ("DUMP BLOCK %d:\n", bb->block_num);
if (!tree)
continue;
for (; tree; tree = tree->next) {
mono_print_ins_index (-1, tree);
}
}
}
/* FIXME: */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->max_vreg = cfg->next_vreg;
}
}
static G_GNUC_UNUSED void
remove_empty_finally_pass (MonoCompile *cfg)
{
MonoBasicBlock *bb;
MonoInst *ins;
gboolean remove_call_handler = FALSE;
// FIXME: other configurations
if (!cfg->llvm_only)
return;
for (int i = 0; i < cfg->header->num_clauses; ++i) {
MonoExceptionClause *clause = &cfg->header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
MonoInst *first, *last;
bb = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (bb);
/* Support only 1 bb for now */
first = mono_bb_first_inst (bb, 0);
if (first->opcode != OP_START_HANDLER)
break;
gboolean empty = TRUE;
while (TRUE) {
if (bb->out_count > 1) {
empty = FALSE;
break;
}
if (bb->flags & BB_HAS_SIDE_EFFECTS) {
empty = FALSE;
break;
}
if (bb->out_count == 0)
break;
if (mono_bb_last_inst (bb, 0)->opcode == OP_ENDFINALLY)
break;
bb = bb->out_bb [0];
}
if (empty) {
/*
* Avoid doing this in nested clauses, because it might mess up the EH code generated by
* the llvm backend.
*/
for (int j = 0; j < cfg->header->num_clauses; ++j) {
MonoExceptionClause *clause2 = &cfg->header->clauses [j];
if (i != j && MONO_OFFSET_IN_CLAUSE (clause2, clause->handler_offset))
empty = FALSE;
}
}
if (empty) {
/* Nullify OP_START_HANDLER */
NULLIFY_INS (first);
last = mono_bb_last_inst (bb, 0);
if (last->opcode == OP_ENDFINALLY)
NULLIFY_INS (last);
if (cfg->verbose_level > 1)
g_print ("removed empty finally clause %d.\n", i);
/* Mark the handler bb as not used anymore */
bb = cfg->cil_offset_to_bb [clause->handler_offset];
bb->flags &= ~BB_EXCEPTION_HANDLER;
cfg->clause_is_dead [i] = TRUE;
remove_call_handler = TRUE;
}
}
}
if (remove_call_handler) {
/* Remove OP_CALL_HANDLER opcodes pointing to the removed finally blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MONO_BB_FOR_EACH_INS (bb, ins) {
if (ins->opcode == OP_CALL_HANDLER && ins->inst_target_bb && !(ins->inst_target_bb->flags & BB_EXCEPTION_HANDLER)) {
NULLIFY_INS (ins);
for (MonoInst *ins2 = ins->next; ins2; ins2 = ins2->next)
NULLIFY_INS (ins2);
break;
}
}
}
}
}
static void
init_backend (MonoBackend *backend)
{
#ifdef MONO_ARCH_NEED_GOT_VAR
backend->need_got_var = 1;
#endif
#ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
backend->have_card_table_wb = 1;
#endif
#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
backend->have_op_generic_class_init = 1;
#endif
#ifdef MONO_ARCH_EMULATE_MUL_DIV
backend->emulate_mul_div = 1;
#endif
#ifdef MONO_ARCH_EMULATE_DIV
backend->emulate_div = 1;
#endif
#if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
backend->emulate_long_shift_opts = 1;
#endif
#ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
backend->have_objc_get_selector = 1;
#endif
#ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
backend->have_generalized_imt_trampoline = 1;
#endif
#ifdef MONO_ARCH_GSHARED_SUPPORTED
backend->gshared_supported = 1;
#endif
if (MONO_ARCH_USE_FPSTACK)
backend->use_fpstack = 1;
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
backend->have_volatile_non_param_register = MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER;
#ifdef MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE
backend->have_op_tailcall_membase = 1;
#endif
#ifdef MONO_ARCH_HAVE_OP_TAILCALL_REG
backend->have_op_tailcall_reg = 1;
#endif
#ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
backend->monitor_enter_adjustment = 1;
#else
backend->monitor_enter_adjustment = MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
#endif
#if defined(MONO_ARCH_ILP32)
backend->ilp32 = 1;
#endif
#ifdef MONO_ARCH_NEED_DIV_CHECK
backend->need_div_check = 1;
#endif
#ifdef NO_UNALIGNED_ACCESS
backend->no_unaligned_access = 1;
#endif
#ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
backend->dyn_call_param_area = MONO_ARCH_DYN_CALL_PARAM_AREA;
#endif
#ifdef MONO_ARCH_NO_DIV_WITH_MUL
backend->disable_div_with_mul = 1;
#endif
#ifdef MONO_ARCH_EXPLICIT_NULL_CHECKS
backend->explicit_null_checks = 1;
#endif
#ifdef MONO_ARCH_HAVE_OPTIMIZED_DIV
backend->optimized_div = 1;
#endif
}
static gboolean
is_simd_supported (MonoCompile *cfg)
{
#ifdef DISABLE_SIMD
return FALSE;
#endif
// FIXME: Clean this up
#ifdef TARGET_WASM
if ((mini_get_cpu_features (cfg) & MONO_CPU_WASM_SIMD) == 0)
return FALSE;
#else
if (cfg->llvm_only)
return FALSE;
#endif
return TRUE;
}
/* Determine how an rgctx is passed to a method */
MonoRgctxAccess
mini_get_rgctx_access_for_method (MonoMethod *method)
{
/* gshared dim methods use an mrgctx */
if (mini_method_is_default_method (method))
return MONO_RGCTX_ACCESS_MRGCTX;
if (mono_method_get_context (method)->method_inst)
return MONO_RGCTX_ACCESS_MRGCTX;
if (method->flags & METHOD_ATTRIBUTE_STATIC || m_class_is_valuetype (method->klass))
return MONO_RGCTX_ACCESS_VTABLE;
return MONO_RGCTX_ACCESS_THIS;
}
/*
* mini_method_compile:
* @method: the method to compile
* @opts: the optimization flags to use
* @flags: compilation flags
* @parts: debug flag
*
* Returns: a MonoCompile* pointer. Caller must check the exception_type
* field in the returned struct to see if compilation succeded.
*/
MonoCompile*
mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index)
{
MonoMethodHeader *header;
MonoMethodSignature *sig;
MonoCompile *cfg;
int i;
gboolean try_generic_shared, try_llvm = FALSE;
MonoMethod *method_to_compile, *method_to_register;
gboolean method_is_gshared = FALSE;
gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
gboolean gsharedvt_method = FALSE;
gboolean interp_entry_only = FALSE;
#ifdef ENABLE_LLVM
gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
#endif
static gboolean verbose_method_inited;
static char **verbose_method_names;
mono_atomic_inc_i32 (&mono_jit_stats.methods_compiled);
MONO_PROFILER_RAISE (jit_begin, (method));
if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
gsharedvt_method = is_gsharedvt_method (method);
/*
* In AOT mode, method can be the following:
* - a gsharedvt method.
* - a method inflated with type parameters. This is for ref/partial sharing.
* - a method inflated with concrete types.
*/
if (compile_aot) {
if (is_open_method (method)) {
try_generic_shared = TRUE;
method_is_gshared = TRUE;
} else {
try_generic_shared = FALSE;
}
g_assert (opts & MONO_OPT_GSHARED);
} else {
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_full (method, FALSE, FALSE, FALSE);
if (mini_is_gsharedvt_sharable_method (method)) {
/*
if (!mono_debug_count ())
try_generic_shared = FALSE;
*/
}
}
/*
if (try_generic_shared && !mono_debug_count ())
try_generic_shared = FALSE;
*/
if (opts & MONO_OPT_GSHARED) {
if (try_generic_shared)
mono_atomic_inc_i32 (&mono_stats.generics_sharable_methods);
else if (mono_method_is_generic_impl (method))
mono_atomic_inc_i32 (&mono_stats.generics_unsharable_methods);
}
#ifdef ENABLE_LLVM
try_llvm = mono_use_llvm || llvm;
#endif
restart_compile:
if (method_is_gshared) {
method_to_compile = method;
} else {
if (try_generic_shared) {
ERROR_DECL (error);
method_to_compile = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
mono_error_assert_ok (error);
} else {
method_to_compile = method;
}
}
cfg = g_new0 (MonoCompile, 1);
cfg->method = method_to_compile;
cfg->mempool = mono_mempool_new ();
cfg->opt = opts;
cfg->run_cctors = run_cctors;
cfg->verbose_level = mini_verbose;
cfg->compile_aot = compile_aot;
cfg->full_aot = full_aot;
cfg->disable_omit_fp = mini_debug_options.disable_omit_fp;
cfg->skip_visibility = method->skip_visibility;
cfg->orig_method = method;
cfg->gen_seq_points = !mini_debug_options.no_seq_points_compact_data || mini_debug_options.gen_sdb_seq_points;
cfg->gen_sdb_seq_points = mini_debug_options.gen_sdb_seq_points;
cfg->llvm_only = (flags & JIT_FLAG_LLVM_ONLY) != 0;
cfg->interp = (flags & JIT_FLAG_INTERP) != 0;
cfg->use_current_cpu = (flags & JIT_FLAG_USE_CURRENT_CPU) != 0;
cfg->self_init = (flags & JIT_FLAG_SELF_INIT) != 0;
cfg->code_exec_only = (flags & JIT_FLAG_CODE_EXEC_ONLY) != 0;
cfg->backend = current_backend;
cfg->jit_mm = jit_mm_for_method (cfg->method);
cfg->mem_manager = m_method_get_mem_manager (cfg->method);
if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC) {
/* We can't have seq points inside gc critical regions */
cfg->gen_seq_points = FALSE;
cfg->gen_sdb_seq_points = FALSE;
}
/* coop requires loop detection to happen */
if (mini_safepoints_enabled ())
cfg->opt |= MONO_OPT_LOOP;
cfg->disable_llvm_implicit_null_checks = mini_debug_options.llvm_disable_implicit_null_checks;
if (cfg->backend->explicit_null_checks || mini_debug_options.explicit_null_checks) {
/* some platforms have null pages, so we can't SIGSEGV */
cfg->explicit_null_checks = TRUE;
cfg->disable_llvm_implicit_null_checks = TRUE;
} else {
cfg->explicit_null_checks = flags & JIT_FLAG_EXPLICIT_NULL_CHECKS;
}
cfg->soft_breakpoints = mini_debug_options.soft_breakpoints;
cfg->check_pinvoke_callconv = mini_debug_options.check_pinvoke_callconv;
cfg->disable_direct_icalls = disable_direct_icalls;
cfg->direct_pinvoke = (flags & JIT_FLAG_DIRECT_PINVOKE) != 0;
cfg->interp_entry_only = interp_entry_only;
if (try_generic_shared)
cfg->gshared = TRUE;
if (cfg->gshared)
cfg->rgctx_access = mini_get_rgctx_access_for_method (cfg->method);
cfg->compile_llvm = try_llvm;
cfg->token_info_hash = g_hash_table_new (NULL, NULL);
if (cfg->compile_aot)
cfg->method_index = aot_method_index;
if (cfg->compile_llvm)
cfg->explicit_null_checks = TRUE;
if (cfg->explicit_null_checks && method->wrapper_type == MONO_WRAPPER_OTHER &&
(mono_marshal_get_wrapper_info (method)->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG ||
mono_marshal_get_wrapper_info (method)->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG)) {
/* These wrappers contain loads/stores which can't fail */
cfg->explicit_null_checks = FALSE;
}
if (!is_simd_supported (cfg))
cfg->opt &= ~MONO_OPT_SIMD;
cfg->r4_stack_type = STACK_R4;
if (cfg->gen_seq_points)
cfg->seq_points = g_ptr_array_new ();
cfg->error = (MonoError*)&cfg->error_value;
error_init (cfg->error);
if (cfg->compile_aot && !try_generic_shared && (method->is_generic || mono_class_is_gtd (method->klass) || method_is_gshared)) {
cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
return cfg;
}
if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
MonoMethodInflated *inflated;
MonoGenericContext *context;
if (gsharedvt_method) {
g_assert (method->is_inflated);
inflated = (MonoMethodInflated*)method;
context = &inflated->context;
/* We are compiling a gsharedvt method directly */
g_assert (compile_aot);
} else {
g_assert (method_to_compile->is_inflated);
inflated = (MonoMethodInflated*)method_to_compile;
context = &inflated->context;
}
mini_init_gsctx (cfg->mempool, context, &cfg->gsctx);
cfg->gsctx_context = context;
cfg->gsharedvt = TRUE;
if (!cfg->llvm_only) {
cfg->disable_llvm = TRUE;
cfg->exception_message = g_strdup ("gsharedvt");
}
}
if (cfg->gshared) {
method_to_register = method_to_compile;
} else {
g_assert (method == method_to_compile);
method_to_register = method;
}
cfg->method_to_register = method_to_register;
ERROR_DECL (err);
sig = mono_method_signature_checked (cfg->method, err);
if (!sig) {
cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
cfg->exception_message = g_strdup (mono_error_get_message (err));
mono_error_cleanup (err);
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
return cfg;
}
header = cfg->header = mono_method_get_header_checked (cfg->method, cfg->error);
if (!header) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
return cfg;
}
if (cfg->llvm_only && cfg->interp && !cfg->interp_entry_only && header->num_clauses) {
cfg->deopt = TRUE;
/* Can't reconstruct inlined state */
cfg->disable_inline = TRUE;
}
#ifdef ENABLE_LLVM
{
static gboolean inited;
if (!inited)
inited = TRUE;
/*
* Check for methods which cannot be compiled by LLVM early, to avoid
* the extra compilation pass.
*/
if (COMPILE_LLVM (cfg)) {
mono_llvm_check_method_supported (cfg);
if (cfg->disable_llvm) {
if (cfg->verbose_level > 0) {
//nm = mono_method_full_name (cfg->method, TRUE);
printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method->klass), method->name, cfg->exception_message);
//g_free (nm);
}
if (cfg->llvm_only) {
g_free (cfg->exception_message);
cfg->disable_aot = TRUE;
return cfg;
}
mono_destroy_compile (cfg);
try_llvm = FALSE;
goto restart_compile;
}
}
}
#endif
cfg->prof_flags = mono_profiler_get_call_instrumentation_flags (cfg->method);
cfg->prof_coverage = mono_profiler_coverage_instrumentation_enabled (cfg->method);
gboolean trace = mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method);
if (trace)
cfg->prof_flags = (MonoProfilerCallInstrumentationFlags)(
MONO_PROFILER_CALL_INSTRUMENTATION_ENTER | MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT |
MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE | MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT);
/* The debugger has no liveness information, so avoid sharing registers/stack slots */
if (mini_debug_options.mdb_optimizations || MONO_CFG_PROFILE_CALL_CONTEXT (cfg)) {
cfg->disable_reuse_registers = TRUE;
cfg->disable_reuse_stack_slots = TRUE;
/*
* This decreases the change the debugger will read registers/stack slots which are
* not yet initialized.
*/
cfg->disable_initlocals_opt = TRUE;
cfg->extend_live_ranges = TRUE;
/* The debugger needs all locals to be on the stack or in a global register */
cfg->disable_vreg_to_lvreg = TRUE;
/* Don't remove unused variables when running inside the debugger since the user
* may still want to view them. */
cfg->disable_deadce_vars = TRUE;
cfg->opt &= ~MONO_OPT_DEADCE;
cfg->opt &= ~MONO_OPT_INLINE;
cfg->opt &= ~MONO_OPT_COPYPROP;
cfg->opt &= ~MONO_OPT_CONSPROP;
/* This is needed for the soft debugger, which doesn't like code after the epilog */
cfg->disable_out_of_line_bblocks = TRUE;
}
mini_gc_init_cfg (cfg);
if (method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
if ((info && (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG))) {
cfg->disable_gc_safe_points = TRUE;
/* This is safe, these wrappers only store to the stack */
cfg->gen_write_barriers = FALSE;
}
}
if (COMPILE_LLVM (cfg)) {
cfg->opt |= MONO_OPT_ABCREM;
}
if (!verbose_method_inited) {
char *env = g_getenv ("MONO_VERBOSE_METHOD");
if (env != NULL)
verbose_method_names = g_strsplit (env, ";", -1);
verbose_method_inited = TRUE;
}
if (verbose_method_names) {
int i;
for (i = 0; verbose_method_names [i] != NULL; i++){
const char *name = verbose_method_names [i];
if ((strchr (name, '.') > name) || strchr (name, ':') || strchr (name, '*')) {
MonoMethodDesc *desc;
desc = mono_method_desc_new (name, TRUE);
if (desc) {
if (mono_method_desc_full_match (desc, cfg->method)) {
cfg->verbose_level = 4;
}
mono_method_desc_free (desc);
}
} else {
if (strcmp (cfg->method->name, name) == 0)
cfg->verbose_level = 4;
}
}
}
cfg->intvars = (guint16 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
if (cfg->verbose_level > 0) {
char *method_name;
method_name = mono_method_get_full_name (method);
g_print ("converting %s%s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", cfg->interp_entry_only ? "interp only " : "", method_name);
/*
if (COMPILE_LLVM (cfg))
g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
else if (cfg->gsharedvt)
g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else if (cfg->gshared)
g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else
g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
*/
g_free (method_name);
}
if (cfg->opt & MONO_OPT_ABCREM)
cfg->opt |= MONO_OPT_SSA;
cfg->rs = mono_regstate_new ();
cfg->next_vreg = cfg->rs->next_vreg;
/* FIXME: Fix SSA to handle branches inside bblocks */
if (cfg->opt & MONO_OPT_SSA)
cfg->enable_extended_bblocks = FALSE;
/*
* FIXME: This confuses liveness analysis because variables which are assigned after
* a branch inside a bblock become part of the kill set, even though the assignment
* might not get executed. This causes the optimize_initlocals pass to delete some
* assignments which are needed.
* Also, the mono_if_conversion pass needs to be modified to recognize the code
* created by this.
*/
//cfg->enable_extended_bblocks = TRUE;
/*
* create MonoInst* which represents arguments and local variables
*/
mono_compile_create_vars (cfg);
mono_cfg_dump_create_context (cfg);
mono_cfg_dump_begin_group (cfg);
MONO_TIME_TRACK (mono_jit_stats.jit_method_to_ir, i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE));
mono_cfg_dump_ir (cfg, "method-to-ir");
if (cfg->gdump_ctx != NULL) {
/* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
mono_insert_nop_in_empty_bb (cfg);
mono_cfg_dump_ir (cfg, "mono_insert_nop_in_empty_bb");
}
if (i < 0) {
if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
if (compile_aot) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
return cfg;
}
mono_destroy_compile (cfg);
try_generic_shared = FALSE;
goto restart_compile;
}
g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
/* cfg contains the details of the failure, so let the caller cleanup */
return cfg;
}
cfg->stat_basic_blocks += cfg->num_bblocks;
if (COMPILE_LLVM (cfg)) {
MonoInst *ins;
/* The IR has to be in SSA form for LLVM */
cfg->opt |= MONO_OPT_SSA;
// FIXME:
if (cfg->ret) {
// Allow SSA on the result value
if (!cfg->interp_entry_only)
cfg->ret->flags &= ~MONO_INST_VOLATILE;
// Add an explicit return instruction referencing the return value
MONO_INST_NEW (cfg, ins, OP_SETRET);
ins->sreg1 = cfg->ret->dreg;
MONO_ADD_INS (cfg->bb_exit, ins);
}
cfg->opt &= ~MONO_OPT_LINEARS;
/* FIXME: */
cfg->opt &= ~MONO_OPT_BRANCH;
}
cfg->after_method_to_ir = TRUE;
/* todo: remove code when we have verified that the liveness for try/catch blocks
* works perfectly
*/
/*
* Currently, this can't be commented out since exception blocks are not
* processed during liveness analysis.
* It is also needed, because otherwise the local optimization passes would
* delete assignments in cases like this:
* r1 <- 1
* <something which throws>
* r1 <- 2
* This also allows SSA to be run on methods containing exception clauses, since
* SSA will ignore variables marked VOLATILE.
*/
MONO_TIME_TRACK (mono_jit_stats.jit_liveness_handle_exception_clauses, mono_liveness_handle_exception_clauses (cfg));
mono_cfg_dump_ir (cfg, "liveness_handle_exception_clauses");
MONO_TIME_TRACK (mono_jit_stats.jit_handle_out_of_line_bblock, mono_handle_out_of_line_bblock (cfg));
mono_cfg_dump_ir (cfg, "handle_out_of_line_bblock");
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
if (!COMPILE_LLVM (cfg)) {
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_long_opts, mono_decompose_long_opts (cfg));
mono_cfg_dump_ir (cfg, "decompose_long_opts");
}
/* Should be done before branch opts */
if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop, mono_local_cprop (cfg));
mono_cfg_dump_ir (cfg, "local_cprop");
}
if (cfg->flags & MONO_CFG_HAS_TYPE_CHECK) {
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_typechecks, mono_decompose_typechecks (cfg));
if (cfg->gdump_ctx != NULL) {
/* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
mono_insert_nop_in_empty_bb (cfg);
}
mono_cfg_dump_ir (cfg, "decompose_typechecks");
}
/*
* Should be done after cprop which can do strength reduction on
* some of these ops, after propagating immediates.
*/
if (cfg->has_emulated_ops) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_emulate_ops, mono_local_emulate_ops (cfg));
mono_cfg_dump_ir (cfg, "local_emulate_ops");
}
if (cfg->opt & MONO_OPT_BRANCH) {
MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches, mono_optimize_branches (cfg));
mono_cfg_dump_ir (cfg, "optimize_branches");
}
/* This must be done _before_ global reg alloc and _after_ decompose */
MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs, mono_handle_global_vregs (cfg));
mono_cfg_dump_ir (cfg, "handle_global_vregs");
if (cfg->opt & MONO_OPT_DEADCE) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce, mono_local_deadce (cfg));
mono_cfg_dump_ir (cfg, "local_deadce");
}
if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_alias_analysis, mono_local_alias_analysis (cfg));
mono_cfg_dump_ir (cfg, "local_alias_analysis");
}
/* Disable this for LLVM to make the IR easier to handle */
if (!COMPILE_LLVM (cfg)) {
MONO_TIME_TRACK (mono_jit_stats.jit_if_conversion, mono_if_conversion (cfg));
mono_cfg_dump_ir (cfg, "if_conversion");
}
remove_empty_finally_pass (cfg);
if (cfg->llvm_only && cfg->interp && !cfg->method->wrapper_type && !interp_entry_only && !cfg->deopt) {
/* Disable llvm if there are still finally clauses left */
for (int i = 0; i < cfg->header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY && !cfg->clause_is_dead [i]) {
cfg->exception_message = g_strdup ("finally clause.");
cfg->disable_llvm = TRUE;
break;
}
}
}
mono_threads_safepoint ();
MONO_TIME_TRACK (mono_jit_stats.jit_bb_ordering, mono_bb_ordering (cfg));
mono_cfg_dump_ir (cfg, "bb_ordering");
if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
/*
* we disable some optimizations if there are too many variables
* because JIT time may become too expensive. The actual number needs
* to be tweaked and eventually the non-linear algorithms should be fixed.
*/
cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
cfg->disable_ssa = TRUE;
}
if (cfg->num_varinfo > 10000 && !cfg->llvm_only)
/* Disable llvm for overly complex methods */
cfg->disable_ssa = TRUE;
if (cfg->opt & MONO_OPT_LOOP) {
MONO_TIME_TRACK (mono_jit_stats.jit_compile_dominator_info, mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM));
MONO_TIME_TRACK (mono_jit_stats.jit_compute_natural_loops, mono_compute_natural_loops (cfg));
}
if (mono_threads_are_safepoints_enabled ()) {
MONO_TIME_TRACK (mono_jit_stats.jit_insert_safepoints, insert_safepoints (cfg));
mono_cfg_dump_ir (cfg, "insert_safepoints");
}
/* after method_to_ir */
if (parts == 1) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
return cfg;
}
/*
if (header->num_clauses)
cfg->disable_ssa = TRUE;
*/
//#define DEBUGSSA "logic_run"
//#define DEBUGSSA_CLASS "Tests"
#ifdef DEBUGSSA
if (!cfg->disable_ssa) {
mono_local_cprop (cfg);
#ifndef DISABLE_SSA
mono_ssa_compute (cfg);
#endif
}
#else
if (cfg->opt & MONO_OPT_SSA) {
if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
#ifndef DISABLE_SSA
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_compute, mono_ssa_compute (cfg));
mono_cfg_dump_ir (cfg, "ssa_compute");
#endif
if (cfg->verbose_level >= 2) {
print_dfn (cfg);
}
}
}
#endif
/* after SSA translation */
if (parts == 2) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
return cfg;
}
if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
#ifndef DISABLE_SSA
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_cprop, mono_ssa_cprop (cfg));
mono_cfg_dump_ir (cfg, "ssa_cprop");
#endif
}
}
#ifndef DISABLE_SSA
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
//mono_ssa_strength_reduction (cfg);
if (cfg->opt & MONO_OPT_DEADCE) {
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_deadce, mono_ssa_deadce (cfg));
mono_cfg_dump_ir (cfg, "ssa_deadce");
}
if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM)) {
MONO_TIME_TRACK (mono_jit_stats.jit_perform_abc_removal, mono_perform_abc_removal (cfg));
mono_cfg_dump_ir (cfg, "perform_abc_removal");
}
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_remove, mono_ssa_remove (cfg));
mono_cfg_dump_ir (cfg, "ssa_remove");
MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop2, mono_local_cprop (cfg));
mono_cfg_dump_ir (cfg, "local_cprop2");
MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs2, mono_handle_global_vregs (cfg));
mono_cfg_dump_ir (cfg, "handle_global_vregs2");
if (cfg->opt & MONO_OPT_DEADCE) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce2, mono_local_deadce (cfg));
mono_cfg_dump_ir (cfg, "local_deadce2");
}
if (cfg->opt & MONO_OPT_BRANCH) {
MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches2, mono_optimize_branches (cfg));
mono_cfg_dump_ir (cfg, "optimize_branches2");
}
}
#endif
if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
mono_ssa_loop_invariant_code_motion (cfg);
mono_cfg_dump_ir (cfg, "loop_invariant_code_motion");
/* This removes MONO_INST_FAULT flags too so perform it unconditionally */
if (cfg->opt & MONO_OPT_ABCREM) {
mono_perform_abc_removal (cfg);
mono_cfg_dump_ir (cfg, "abc_removal");
}
}
/* after SSA removal */
if (parts == 3) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
return cfg;
}
if (cfg->llvm_only && cfg->gsharedvt)
mono_ssa_remove_gsharedvt (cfg);
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg))
mono_decompose_soft_float (cfg);
#endif
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_vtype_opts, mono_decompose_vtype_opts (cfg));
if (cfg->flags & MONO_CFG_NEEDS_DECOMPOSE) {
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_array_access_opts, mono_decompose_array_access_opts (cfg));
mono_cfg_dump_ir (cfg, "decompose_array_access_opts");
}
if (cfg->got_var) {
#ifndef MONO_ARCH_GOT_REG
GList *regs;
#endif
int got_reg;
g_assert (cfg->got_var_allocated);
/*
* Allways allocate the GOT var to a register, because keeping it
* in memory will increase the number of live temporaries in some
* code created by inssel.brg, leading to the well known spills+
* branches problem. Testcase: mcs crash in
* System.MonoCustomAttrs:GetCustomAttributes.
*/
#ifdef MONO_ARCH_GOT_REG
got_reg = MONO_ARCH_GOT_REG;
#else
regs = mono_arch_get_global_int_regs (cfg);
g_assert (regs);
got_reg = GPOINTER_TO_INT (regs->data);
g_list_free (regs);
#endif
cfg->got_var->opcode = OP_REGVAR;
cfg->got_var->dreg = got_reg;
cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
}
/*
* Have to call this again to process variables added since the first call.
*/
MONO_TIME_TRACK(mono_jit_stats.jit_liveness_handle_exception_clauses2, mono_liveness_handle_exception_clauses (cfg));
if (cfg->opt & MONO_OPT_LINEARS) {
GList *vars, *regs, *l;
/* fixme: maybe we can avoid to compute livenesss here if already computed ? */
cfg->comp_done &= ~MONO_COMP_LIVENESS;
if (!(cfg->comp_done & MONO_COMP_LIVENESS))
MONO_TIME_TRACK (mono_jit_stats.jit_analyze_liveness, mono_analyze_liveness (cfg));
if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
regs = mono_arch_get_global_int_regs (cfg);
/* Remove the reg reserved for holding the GOT address */
if (cfg->got_var) {
for (l = regs; l; l = l->next) {
if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
regs = g_list_delete_link (regs, l);
break;
}
}
}
MONO_TIME_TRACK (mono_jit_stats.jit_linear_scan, mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs));
mono_cfg_dump_ir (cfg, "linear_scan");
}
}
//mono_print_code (cfg, "");
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
if (!COMPILE_LLVM (cfg)) {
MONO_TIME_TRACK (mono_jit_stats.jit_arch_allocate_vars, mono_arch_allocate_vars (cfg));
mono_cfg_dump_ir (cfg, "arch_allocate_vars");
if (cfg->exception_type)
return cfg;
}
if (cfg->gsharedvt)
mono_allocate_gsharedvt_vars (cfg);
if (!COMPILE_LLVM (cfg)) {
gboolean need_local_opts;
MONO_TIME_TRACK (mono_jit_stats.jit_spill_global_vars, mono_spill_global_vars (cfg, &need_local_opts));
mono_cfg_dump_ir (cfg, "spill_global_vars");
if (need_local_opts || cfg->compile_aot) {
/* To optimize code created by spill_global_vars */
MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop3, mono_local_cprop (cfg));
if (cfg->opt & MONO_OPT_DEADCE)
MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce3, mono_local_deadce (cfg));
mono_cfg_dump_ir (cfg, "needs_local_opts");
}
}
mono_insert_branches_between_bblocks (cfg);
if (COMPILE_LLVM (cfg)) {
#ifdef ENABLE_LLVM
char *nm;
/* The IR has to be in SSA form for LLVM */
if (!(cfg->comp_done & MONO_COMP_SSA)) {
cfg->exception_message = g_strdup ("SSA disabled.");
cfg->disable_llvm = TRUE;
}
if (cfg->flags & MONO_CFG_NEEDS_DECOMPOSE)
mono_decompose_array_access_opts (cfg);
if (!cfg->disable_llvm)
mono_llvm_emit_method (cfg);
if (cfg->disable_llvm) {
if (cfg->verbose_level > 0) {
//nm = mono_method_full_name (cfg->method, TRUE);
printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method->klass), method->name, cfg->exception_message);
//g_free (nm);
}
if (cfg->llvm_only && cfg->interp && !interp_entry_only) {
// If interp support is enabled, restart compilation, generating interp entry code only
interp_entry_only = TRUE;
mono_destroy_compile (cfg);
goto restart_compile;
}
if (cfg->llvm_only) {
cfg->disable_aot = TRUE;
return cfg;
}
mono_destroy_compile (cfg);
try_llvm = FALSE;
goto restart_compile;
}
if (cfg->verbose_level > 0 && !cfg->compile_aot) {
nm = mono_method_get_full_name (cfg->method);
g_print ("LLVM Method %s emitted at %p to %p (code length %d)\n",
nm,
cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len);
g_free (nm);
}
#endif
} else {
MONO_TIME_TRACK (mono_jit_stats.jit_codegen, mono_codegen (cfg));
mono_cfg_dump_ir (cfg, "codegen");
if (cfg->exception_type)
return cfg;
}
if (COMPILE_LLVM (cfg))
mono_atomic_inc_i32 (&mono_jit_stats.methods_with_llvm);
else
mono_atomic_inc_i32 (&mono_jit_stats.methods_without_llvm);
MONO_TIME_TRACK (mono_jit_stats.jit_create_jit_info, cfg->jit_info = create_jit_info (cfg, method_to_compile));
if (cfg->extend_live_ranges) {
/* Extend live ranges to cover the whole method */
for (i = 0; i < cfg->num_varinfo; ++i)
MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
}
MONO_TIME_TRACK (mono_jit_stats.jit_gc_create_gc_map, mini_gc_create_gc_map (cfg));
MONO_TIME_TRACK (mono_jit_stats.jit_save_seq_point_info, mono_save_seq_point_info (cfg, cfg->jit_info));
if (!cfg->compile_aot)
mono_lldb_save_method_info (cfg);
if (cfg->verbose_level >= 2) {
char *id = mono_method_full_name (cfg->method, TRUE);
g_print ("\n*** ASM for %s ***\n", id);
mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
g_print ("***\n\n");
g_free (id);
}
if (!cfg->compile_aot && !(flags & JIT_FLAG_DISCARD_RESULTS)) {
mono_jit_info_table_add (cfg->jit_info);
if (cfg->method->dynamic) {
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
MonoJitDynamicMethodInfo *res;
jit_mm_lock (jit_mm);
g_assert (jit_mm->dynamic_code_hash);
res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (jit_mm->dynamic_code_hash, method);
jit_mm_unlock (jit_mm);
g_assert (res);
res->ji = cfg->jit_info;
}
mono_postprocess_patches_after_ji_publish (cfg);
}
#if 0
if (cfg->gsharedvt)
printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
#endif
/* collect statistics */
#ifndef DISABLE_PERFCOUNTERS
mono_atomic_inc_i32 (&mono_perfcounters->jit_methods);
mono_atomic_fetch_add_i32 (&mono_perfcounters->jit_bytes, header->code_size);
#endif
gint32 code_size_ratio = cfg->code_len;
mono_atomic_fetch_add_i32 (&mono_jit_stats.allocated_code_size, code_size_ratio);
mono_atomic_fetch_add_i32 (&mono_jit_stats.native_code_size, code_size_ratio);
/* FIXME: use an explicit function to read booleans */
if ((gboolean)mono_atomic_load_i32 ((gint32*)&mono_jit_stats.enabled)) {
if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.biggest_method_size)) {
mono_atomic_store_i32 (&mono_jit_stats.biggest_method_size, code_size_ratio);
char *biggest_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
biggest_method = (char*)mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.biggest_method, biggest_method);
g_free (biggest_method);
}
code_size_ratio = (code_size_ratio * 100) / header->code_size;
if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.max_code_size_ratio)) {
mono_atomic_store_i32 (&mono_jit_stats.max_code_size_ratio, code_size_ratio);
char *max_ratio_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
max_ratio_method = (char*)mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.max_ratio_method, max_ratio_method);
g_free (max_ratio_method);
}
}
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
mono_cfg_dump_close_group (cfg);
return cfg;
}
gboolean
mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
{
int i;
MonoGenericContainer *container;
MonoGenericInst *ginst;
if (mono_class_is_ginst (klass)) {
container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
ginst = mono_class_get_generic_class (klass)->context.class_inst;
} else if (mono_class_is_gtd (klass) && context_used) {
container = mono_class_get_generic_container (klass);
ginst = container->context.class_inst;
} else {
return FALSE;
}
for (i = 0; i < container->type_argc; ++i) {
MonoType *type;
if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
continue;
type = ginst->type_argv [i];
if (mini_type_is_reference (type))
return TRUE;
}
return FALSE;
}
void
mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
{
TryBlockHole *hole = (TryBlockHole *)mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
hole->clause = clause;
hole->start_offset = start - cfg->native_code;
hole->basic_block = bb;
cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
}
void
mono_cfg_set_exception (MonoCompile *cfg, MonoExceptionType type)
{
cfg->exception_type = type;
}
/* Assumes ownership of the MSG argument */
void
mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg)
{
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_generic_error (cfg->error, "System", "InvalidProgramException", "%s", msg);
}
#endif /* DISABLE_JIT */
gint64 mono_time_track_start ()
{
return mono_100ns_ticks ();
}
/*
* mono_time_track_end:
*
* Uses UnlockedAddDouble () to update \param time.
*/
void mono_time_track_end (gint64 *time, gint64 start)
{
UnlockedAdd64 (time, mono_100ns_ticks () - start);
}
/*
* mono_update_jit_stats:
*
* Only call this function in locked environments to avoid data races.
*/
MONO_NO_SANITIZE_THREAD
void
mono_update_jit_stats (MonoCompile *cfg)
{
mono_jit_stats.allocate_var += cfg->stat_allocate_var;
mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
mono_jit_stats.regvars += cfg->stat_n_regvars;
mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
}
/*
* mono_jit_compile_method_inner:
*
* Main entry point for the JIT.
*/
gpointer
mono_jit_compile_method_inner (MonoMethod *method, int opt, MonoError *error)
{
MonoCompile *cfg;
gpointer code = NULL;
MonoJitInfo *jinfo, *info;
MonoVTable *vtable;
MonoException *ex = NULL;
gint64 start;
MonoMethod *prof_method, *shared;
error_init (error);
start = mono_time_track_start ();
cfg = mini_method_compile (method, opt, JIT_FLAG_RUN_CCTORS, 0, -1);
gint64 jit_time = 0.0;
mono_time_track_end (&jit_time, start);
UnlockedAdd64 (&mono_jit_stats.jit_time, jit_time);
prof_method = cfg->method;
switch (cfg->exception_type) {
case MONO_EXCEPTION_NONE:
break;
case MONO_EXCEPTION_TYPE_LOAD:
case MONO_EXCEPTION_MISSING_FIELD:
case MONO_EXCEPTION_MISSING_METHOD:
case MONO_EXCEPTION_FILE_NOT_FOUND:
case MONO_EXCEPTION_BAD_IMAGE:
case MONO_EXCEPTION_INVALID_PROGRAM: {
/* Throw a type load exception if needed */
if (cfg->exception_ptr) {
ex = mono_class_get_exception_for_failure ((MonoClass *)cfg->exception_ptr);
} else {
if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
ex = mono_get_exception_bad_image_format (cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_INVALID_PROGRAM)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message);
else
g_assert_not_reached ();
}
break;
}
case MONO_EXCEPTION_MONO_ERROR:
// FIXME: MonoError has no copy ctor
g_assert (!is_ok (cfg->error));
ex = mono_error_convert_to_exception (cfg->error);
break;
default:
g_assert_not_reached ();
}
if (ex) {
MONO_PROFILER_RAISE (jit_failed, (method));
mono_destroy_compile (cfg);
mono_error_set_exception_instance (error, ex);
return NULL;
}
if (mono_method_is_generic_sharable (method, FALSE)) {
shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
if (!is_ok (error)) {
MONO_PROFILER_RAISE (jit_failed, (method));
mono_destroy_compile (cfg);
return NULL;
}
} else {
shared = NULL;
}
mono_loader_lock ();
if (mono_stats_method_desc && mono_method_desc_full_match (mono_stats_method_desc, method)) {
g_printf ("Printing runtime stats at method: %s\n", mono_method_get_full_name (method));
mono_runtime_print_stats ();
}
/* Check if some other thread already did the job. In this case, we can
discard the code this thread generated. */
info = mini_lookup_method (method, shared);
if (info) {
code = info->code_start;
discarded_code ++;
discarded_jit_time += jit_time;
}
if (code == NULL) {
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
/* The lookup + insert is atomic since this is done inside the domain lock */
jit_code_hash_lock (jit_mm);
mono_internal_hash_table_insert (&jit_mm->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
jit_code_hash_unlock (jit_mm);
code = cfg->native_code;
if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
mono_atomic_inc_i32 (&mono_stats.generics_shared_methods);
if (cfg->gsharedvt)
mono_atomic_inc_i32 (&mono_stats.gsharedvt_methods);
}
jinfo = cfg->jit_info;
/*
* Update global stats while holding a lock, instead of doing many
* mono_atomic_inc_i32 operations during JITting.
*/
mono_update_jit_stats (cfg);
mono_destroy_compile (cfg);
mini_patch_llvm_jit_callees (method, code);
#ifndef DISABLE_JIT
mono_emit_jit_map (jinfo);
mono_emit_jit_dump (jinfo, code);
#endif
mono_loader_unlock ();
if (!is_ok (error))
return NULL;
vtable = mono_class_vtable_checked (method->klass, error);
return_val_if_nok (error, NULL);
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
if (mono_marshal_method_from_wrapper (method)) {
/* Native func wrappers have no method */
/* The profiler doesn't know about wrappers, so pass the original icall method */
MONO_PROFILER_RAISE (jit_done, (mono_marshal_method_from_wrapper (method), jinfo));
}
}
MONO_PROFILER_RAISE (jit_done, (method, jinfo));
if (prof_method != method)
MONO_PROFILER_RAISE (jit_done, (prof_method, jinfo));
if (!mono_runtime_class_init_full (vtable, error))
return NULL;
return MINI_ADDR_TO_FTNPTR (code);
}
/*
* mini_get_underlying_type:
*
* Return the type the JIT will use during compilation.
* Handles: byref, enums, native types, bool/char, ref types, generic sharing.
* For gsharedvt types, it will return the original VAR/MVAR.
*/
MonoType*
mini_get_underlying_type (MonoType *type)
{
return mini_type_get_underlying_type (type);
}
void
mini_jit_init (void)
{
mono_os_mutex_init_recursive (&jit_mutex);
#ifndef DISABLE_JIT
mono_counters_register ("Discarded method code", MONO_COUNTER_JIT | MONO_COUNTER_INT, &discarded_code);
mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &discarded_jit_time);
mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &jinfo_try_holes_size);
mono_counters_register ("JIT/method_to_ir", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_method_to_ir);
mono_counters_register ("JIT/liveness_handle_exception_clauses", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_liveness_handle_exception_clauses);
mono_counters_register ("JIT/handle_out_of_line_bblock", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_out_of_line_bblock);
mono_counters_register ("JIT/decompose_long_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_long_opts);
mono_counters_register ("JIT/decompose_typechecks", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_typechecks);
mono_counters_register ("JIT/local_cprop", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop);
mono_counters_register ("JIT/local_emulate_ops", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_emulate_ops);
mono_counters_register ("JIT/optimize_branches", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_optimize_branches);
mono_counters_register ("JIT/handle_global_vregs", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_global_vregs);
mono_counters_register ("JIT/local_deadce", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce);
mono_counters_register ("JIT/local_alias_analysis", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_alias_analysis);
mono_counters_register ("JIT/if_conversion", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_if_conversion);
mono_counters_register ("JIT/bb_ordering", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_bb_ordering);
mono_counters_register ("JIT/compile_dominator_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_compile_dominator_info);
mono_counters_register ("JIT/compute_natural_loops", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_compute_natural_loops);
mono_counters_register ("JIT/insert_safepoints", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_insert_safepoints);
mono_counters_register ("JIT/ssa_compute", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_compute);
mono_counters_register ("JIT/ssa_cprop", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_cprop);
mono_counters_register ("JIT/ssa_deadce", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_deadce);
mono_counters_register ("JIT/perform_abc_removal", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_perform_abc_removal);
mono_counters_register ("JIT/ssa_remove", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_remove);
mono_counters_register ("JIT/local_cprop2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop2);
mono_counters_register ("JIT/handle_global_vregs2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_global_vregs2);
mono_counters_register ("JIT/local_deadce2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce2);
mono_counters_register ("JIT/optimize_branches2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_optimize_branches2);
mono_counters_register ("JIT/decompose_vtype_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_vtype_opts);
mono_counters_register ("JIT/decompose_array_access_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_array_access_opts);
mono_counters_register ("JIT/liveness_handle_exception_clauses2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_liveness_handle_exception_clauses2);
mono_counters_register ("JIT/analyze_liveness", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_analyze_liveness);
mono_counters_register ("JIT/linear_scan", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_linear_scan);
mono_counters_register ("JIT/arch_allocate_vars", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_arch_allocate_vars);
mono_counters_register ("JIT/spill_global_var", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_spill_global_vars);
mono_counters_register ("JIT/local_cprop3", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop3);
mono_counters_register ("JIT/local_deadce3", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce3);
mono_counters_register ("JIT/codegen", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_codegen);
mono_counters_register ("JIT/create_jit_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_create_jit_info);
mono_counters_register ("JIT/gc_create_gc_map", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_gc_create_gc_map);
mono_counters_register ("JIT/save_seq_point_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_save_seq_point_info);
mono_counters_register ("Total time spent JITting", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_time);
mono_counters_register ("Basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.basic_blocks);
mono_counters_register ("Max basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.max_basic_blocks);
mono_counters_register ("Allocated vars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocate_var);
mono_counters_register ("Code reallocs", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.code_reallocs);
mono_counters_register ("Allocated code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocated_code_size);
mono_counters_register ("Allocated seq points size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocated_seq_points_size);
mono_counters_register ("Inlineable methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlineable_methods);
mono_counters_register ("Inlined methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlined_methods);
mono_counters_register ("Regvars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.regvars);
mono_counters_register ("Locals stack size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.locals_stack_size);
mono_counters_register ("Method cache lookups", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_lookups);
mono_counters_register ("Compiled CIL code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.cil_code_size);
mono_counters_register ("Native code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.native_code_size);
mono_counters_register ("Aliases found", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.alias_found);
mono_counters_register ("Aliases eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.alias_removed);
mono_counters_register ("Aliased loads eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.loads_eliminated);
mono_counters_register ("Aliased stores eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.stores_eliminated);
mono_counters_register ("Optimized immediate divisions", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.optimized_divisions);
current_backend = g_new0 (MonoBackend, 1);
init_backend (current_backend);
#endif
}
#ifndef ENABLE_LLVM
void
mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
{
g_assert_not_reached ();
}
gpointer
mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
{
g_assert_not_reached ();
}
gpointer
mono_llvm_emit_aot_data_aligned (const char *symbol, guint8 *data, int data_len, int align)
{
g_assert_not_reached ();
}
#endif
#if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
void
mono_llvm_cpp_throw_exception (void)
{
g_assert_not_reached ();
}
void
mono_llvm_cpp_catch_exception (MonoLLVMInvokeCallback cb, gpointer arg, gboolean *out_thrown)
{
g_assert_not_reached ();
}
#endif
#ifdef DISABLE_JIT
MonoCompile*
mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index)
{
g_assert_not_reached ();
return NULL;
}
void
mono_destroy_compile (MonoCompile *cfg)
{
g_assert_not_reached ();
}
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
g_assert_not_reached ();
}
#else // DISABLE_JIT
guint8*
mini_realloc_code_slow (MonoCompile *cfg, int size)
{
const int EXTRA_CODE_SPACE = 16;
if (cfg->code_len + size > (cfg->code_size - EXTRA_CODE_SPACE)) {
while (cfg->code_len + size > (cfg->code_size - EXTRA_CODE_SPACE))
cfg->code_size = cfg->code_size * 2 + EXTRA_CODE_SPACE;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
cfg->stat_code_reallocs++;
}
return cfg->native_code + cfg->code_len;
}
#endif /* DISABLE_JIT */
gboolean
mini_class_is_system_array (MonoClass *klass)
{
return m_class_get_parent (klass) == mono_defaults.array_class;
}
/*
* mono_target_pagesize:
*
* query pagesize used to determine if an implicit NRE can be used
*/
int
mono_target_pagesize (void)
{
/* We could query the system's pagesize via mono_pagesize (), however there
* are pitfalls: sysconf (3) is called on some posix like systems, and per
* POSIX.1-2008 this function doesn't have to be async-safe. Since this
* function can be called from a signal handler, we simplify things by
* using 4k on all targets. Implicit null-checks with an offset larger than
* 4k are _very_ uncommon, so we don't mind emitting an explicit null-check
* for those cases.
*/
return 4 * 1024;
}
MonoCPUFeatures
mini_get_cpu_features (MonoCompile* cfg)
{
MonoCPUFeatures features = (MonoCPUFeatures)0;
#if !defined(MONO_CROSS_COMPILE)
if (!cfg->compile_aot || cfg->use_current_cpu) {
// detect current CPU features if we are in JIT mode or AOT with use_current_cpu flag.
#if defined(ENABLE_LLVM)
features = mono_llvm_get_cpu_features (); // llvm has a nice built-in API to detect features
#elif defined(TARGET_AMD64) || defined(TARGET_X86)
features = mono_arch_get_cpu_features ();
#endif
}
#endif
#if defined(TARGET_ARM64)
// All Arm64 devices have this set
features |= MONO_CPU_ARM64_BASE;
// This is a standard part of ARMv8-A; see A1.5 in "ARM
// Architecture Reference Manual ARMv8, for ARMv8-A
// architecture profile"
features |= MONO_CPU_ARM64_NEON;
#endif
// apply parameters passed via -mattr
return (features | mono_cpu_features_enabled) & ~mono_cpu_features_disabled;
}
| /**
* \file
* The new Mono code generator.
*
* Authors:
* Paolo Molaro ([email protected])
* Dietmar Maurer ([email protected])
*
* Copyright 2002-2003 Ximian, Inc.
* Copyright 2003-2010 Novell, Inc.
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <math.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <mono/utils/memcheck.h>
#include <mono/metadata/assembly.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class.h>
#include <mono/metadata/object.h>
#include <mono/metadata/tokentype.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-config.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/mono-debug.h>
#include <mono/metadata/gc-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/runtime.h>
#include <mono/metadata/attrdefs.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/mono-error-internals.h>
#include <mono/utils/mono-logger-internals.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-path.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/mono-hwcap.h>
#include <mono/utils/dtrace.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/unlocked.h>
#include <mono/utils/mono-time.h>
#include "mini.h"
#include "seq-points.h"
#include <string.h>
#include <ctype.h>
#include "trace.h"
#include "ir-emit.h"
#include "jit-icalls.h"
#include "mini-gc.h"
#include "llvm-runtime.h"
#include "mini-llvm.h"
#include "lldb.h"
#include "aot-runtime.h"
#include "mini-runtime.h"
MonoCallSpec *mono_jit_trace_calls;
MonoMethodDesc *mono_inject_async_exc_method;
int mono_inject_async_exc_pos;
MonoMethodDesc *mono_break_at_bb_method;
int mono_break_at_bb_bb_num;
gboolean mono_do_x86_stack_align = TRUE;
/* Counters */
static guint32 discarded_code;
static gint64 discarded_jit_time;
#define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
#define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
static mono_mutex_t jit_mutex;
#ifndef DISABLE_JIT
static guint32 jinfo_try_holes_size;
static MonoBackend *current_backend;
gpointer
mono_realloc_native_code (MonoCompile *cfg)
{
return g_realloc (cfg->native_code, cfg->code_size);
}
typedef struct {
MonoExceptionClause *clause;
MonoBasicBlock *basic_block;
int start_offset;
} TryBlockHole;
/**
* mono_emit_unwind_op:
*
* Add an unwind op with the given parameters for the list of unwind ops stored in
* cfg->unwind_ops.
*/
void
mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
{
MonoUnwindOp *op = (MonoUnwindOp *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
op->op = tag;
op->reg = reg;
op->val = val;
op->when = when;
cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
if (cfg->verbose_level > 1) {
switch (tag) {
case DW_CFA_def_cfa:
printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
break;
case DW_CFA_def_cfa_register:
printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
break;
case DW_CFA_def_cfa_offset:
printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
break;
case DW_CFA_offset:
printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
break;
}
}
}
/**
* mono_unlink_bblock:
*
* Unlink two basic blocks.
*/
void
mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
int i, pos;
gboolean found;
found = FALSE;
for (i = 0; i < from->out_count; ++i) {
if (to == from->out_bb [i]) {
found = TRUE;
break;
}
}
if (found) {
pos = 0;
for (i = 0; i < from->out_count; ++i) {
if (from->out_bb [i] != to)
from->out_bb [pos ++] = from->out_bb [i];
}
g_assert (pos == from->out_count - 1);
from->out_count--;
}
found = FALSE;
for (i = 0; i < to->in_count; ++i) {
if (from == to->in_bb [i]) {
found = TRUE;
break;
}
}
if (found) {
pos = 0;
for (i = 0; i < to->in_count; ++i) {
if (to->in_bb [i] != from)
to->in_bb [pos ++] = to->in_bb [i];
}
g_assert (pos == to->in_count - 1);
to->in_count--;
}
}
/*
* mono_bblocks_linked:
*
* Return whenever BB1 and BB2 are linked in the CFG.
*/
gboolean
mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
{
int i;
for (i = 0; i < bb1->out_count; ++i) {
if (bb1->out_bb [i] == bb2)
return TRUE;
}
return FALSE;
}
static int
mono_find_block_region_notry (MonoCompile *cfg, int offset)
{
MonoMethodHeader *header = cfg->header;
MonoExceptionClause *clause;
int i;
for (i = 0; i < header->num_clauses; ++i) {
clause = &header->clauses [i];
if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
(offset < (clause->handler_offset)))
return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
else
return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
}
}
return -1;
}
/*
* mono_get_block_region_notry:
*
* Return the region corresponding to REGION, ignoring try clauses nested inside
* finally clauses.
*/
int
mono_get_block_region_notry (MonoCompile *cfg, int region)
{
if ((region & (0xf << 4)) == MONO_REGION_TRY) {
MonoMethodHeader *header = cfg->header;
/*
* This can happen if a try clause is nested inside a finally clause.
*/
int clause_index = (region >> 8) - 1;
g_assert (clause_index >= 0 && clause_index < header->num_clauses);
region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
}
return region;
}
MonoInst *
mono_find_spvar_for_region (MonoCompile *cfg, int region)
{
region = mono_get_block_region_notry (cfg, region);
return (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
}
static void
df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
{
int i;
array [*dfn] = start;
/* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
for (i = 0; i < start->out_count; ++i) {
if (start->out_bb [i]->dfn)
continue;
(*dfn)++;
start->out_bb [i]->dfn = *dfn;
start->out_bb [i]->df_parent = start;
array [*dfn] = start->out_bb [i];
df_visit (start->out_bb [i], dfn, array);
}
}
guint32
mono_reverse_branch_op (guint32 opcode)
{
static const int reverse_map [] = {
CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
};
static const int reverse_fmap [] = {
OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
};
static const int reverse_lmap [] = {
OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
};
static const int reverse_imap [] = {
OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
};
if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
opcode = reverse_map [opcode - CEE_BEQ];
} else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
opcode = reverse_fmap [opcode - OP_FBEQ];
} else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
opcode = reverse_lmap [opcode - OP_LBEQ];
} else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
opcode = reverse_imap [opcode - OP_IBEQ];
} else
g_assert_not_reached ();
return opcode;
}
guint
mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
{
type = mini_get_underlying_type (type);
handle_enum:
switch (type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
return OP_STOREI1_MEMBASE_REG;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
return OP_STOREI2_MEMBASE_REG;
case MONO_TYPE_I4:
case MONO_TYPE_U4:
return OP_STOREI4_MEMBASE_REG;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_STORE_MEMBASE_REG;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_STORE_MEMBASE_REG;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_STOREI8_MEMBASE_REG;
case MONO_TYPE_R4:
return OP_STORER4_MEMBASE_REG;
case MONO_TYPE_R8:
return OP_STORER8_MEMBASE_REG;
case MONO_TYPE_VALUETYPE:
if (m_class_is_enumtype (type->data.klass)) {
type = mono_class_enum_basetype_internal (type->data.klass);
goto handle_enum;
}
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_STOREX_MEMBASE;
return OP_STOREV_MEMBASE;
case MONO_TYPE_TYPEDBYREF:
return OP_STOREV_MEMBASE;
case MONO_TYPE_GENERICINST:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_STOREX_MEMBASE;
type = m_class_get_byval_arg (type->data.generic_class->container_class);
goto handle_enum;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (mini_type_var_is_vt (type));
return OP_STOREV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
}
return -1;
}
guint
mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
{
type = mini_get_underlying_type (type);
switch (type->type) {
case MONO_TYPE_I1:
return OP_LOADI1_MEMBASE;
case MONO_TYPE_U1:
return OP_LOADU1_MEMBASE;
case MONO_TYPE_I2:
return OP_LOADI2_MEMBASE;
case MONO_TYPE_U2:
return OP_LOADU2_MEMBASE;
case MONO_TYPE_I4:
return OP_LOADI4_MEMBASE;
case MONO_TYPE_U4:
return OP_LOADU4_MEMBASE;
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
case MONO_TYPE_FNPTR:
return OP_LOAD_MEMBASE;
case MONO_TYPE_CLASS:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
return OP_LOAD_MEMBASE;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
return OP_LOADI8_MEMBASE;
case MONO_TYPE_R4:
return OP_LOADR4_MEMBASE;
case MONO_TYPE_R8:
return OP_LOADR8_MEMBASE;
case MONO_TYPE_VALUETYPE:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_LOADX_MEMBASE;
case MONO_TYPE_TYPEDBYREF:
return OP_LOADV_MEMBASE;
case MONO_TYPE_GENERICINST:
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
return OP_LOADX_MEMBASE;
if (mono_type_generic_inst_is_valuetype (type))
return OP_LOADV_MEMBASE;
else
return OP_LOAD_MEMBASE;
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
g_assert (cfg->gshared);
g_assert (mini_type_var_is_vt (type));
return OP_LOADV_MEMBASE;
default:
g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
}
return -1;
}
guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
type = mini_get_underlying_type (type);
if (cfg->gshared && !m_type_is_byref (type) && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
g_assert (mini_type_var_is_vt (type));
return CEE_STOBJ;
}
return mono_type_to_stind (type);
}
int
mono_op_imm_to_op (int opcode)
{
switch (opcode) {
case OP_ADD_IMM:
#if SIZEOF_REGISTER == 4
return OP_IADD;
#else
return OP_LADD;
#endif
case OP_IADD_IMM:
return OP_IADD;
case OP_LADD_IMM:
return OP_LADD;
case OP_ISUB_IMM:
return OP_ISUB;
case OP_LSUB_IMM:
return OP_LSUB;
case OP_IMUL_IMM:
return OP_IMUL;
case OP_LMUL_IMM:
return OP_LMUL;
case OP_AND_IMM:
#if SIZEOF_REGISTER == 4
return OP_IAND;
#else
return OP_LAND;
#endif
case OP_OR_IMM:
#if SIZEOF_REGISTER == 4
return OP_IOR;
#else
return OP_LOR;
#endif
case OP_XOR_IMM:
#if SIZEOF_REGISTER == 4
return OP_IXOR;
#else
return OP_LXOR;
#endif
case OP_IAND_IMM:
return OP_IAND;
case OP_LAND_IMM:
return OP_LAND;
case OP_IOR_IMM:
return OP_IOR;
case OP_LOR_IMM:
return OP_LOR;
case OP_IXOR_IMM:
return OP_IXOR;
case OP_LXOR_IMM:
return OP_LXOR;
case OP_ISHL_IMM:
return OP_ISHL;
case OP_LSHL_IMM:
return OP_LSHL;
case OP_ISHR_IMM:
return OP_ISHR;
case OP_LSHR_IMM:
return OP_LSHR;
case OP_ISHR_UN_IMM:
return OP_ISHR_UN;
case OP_LSHR_UN_IMM:
return OP_LSHR_UN;
case OP_IDIV_IMM:
return OP_IDIV;
case OP_LDIV_IMM:
return OP_LDIV;
case OP_IDIV_UN_IMM:
return OP_IDIV_UN;
case OP_LDIV_UN_IMM:
return OP_LDIV_UN;
case OP_IREM_UN_IMM:
return OP_IREM_UN;
case OP_LREM_UN_IMM:
return OP_LREM_UN;
case OP_IREM_IMM:
return OP_IREM;
case OP_LREM_IMM:
return OP_LREM;
case OP_DIV_IMM:
#if SIZEOF_REGISTER == 4
return OP_IDIV;
#else
return OP_LDIV;
#endif
case OP_REM_IMM:
#if SIZEOF_REGISTER == 4
return OP_IREM;
#else
return OP_LREM;
#endif
case OP_ADDCC_IMM:
return OP_ADDCC;
case OP_ADC_IMM:
return OP_ADC;
case OP_SUBCC_IMM:
return OP_SUBCC;
case OP_SBB_IMM:
return OP_SBB;
case OP_IADC_IMM:
return OP_IADC;
case OP_ISBB_IMM:
return OP_ISBB;
case OP_COMPARE_IMM:
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
case OP_LOCALLOC_IMM:
return OP_LOCALLOC;
}
return -1;
}
/*
* mono_decompose_op_imm:
*
* Replace the OP_.._IMM INS with its non IMM variant.
*/
void
mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
{
int opcode2 = mono_op_imm_to_op (ins->opcode);
MonoInst *temp;
guint32 dreg;
const char *spec = INS_INFO (ins->opcode);
if (spec [MONO_INST_SRC2] == 'l') {
dreg = mono_alloc_lreg (cfg);
/* Load the 64bit constant using decomposed ops */
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins_get_l_low (ins);
temp->dreg = MONO_LVREG_LS (dreg);
mono_bblock_insert_before_ins (bb, ins, temp);
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins_get_l_high (ins);
temp->dreg = MONO_LVREG_MS (dreg);
} else {
dreg = mono_alloc_ireg (cfg);
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = dreg;
}
mono_bblock_insert_before_ins (bb, ins, temp);
if (opcode2 == -1)
g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
ins->opcode = opcode2;
if (ins->opcode == OP_LOCALLOC)
ins->sreg1 = dreg;
else
ins->sreg2 = dreg;
bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
}
static void
set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
{
if (vreg >= cfg->vreg_to_inst_len) {
MonoInst **tmp = cfg->vreg_to_inst;
int size = cfg->vreg_to_inst_len;
while (vreg >= cfg->vreg_to_inst_len)
cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
cfg->vreg_to_inst = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
if (size)
memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
}
cfg->vreg_to_inst [vreg] = inst;
}
#define mono_type_is_long(type) (!m_type_is_byref (type) && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
#define mono_type_is_float(type) (!m_type_is_byref (type) && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
MonoInst*
mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
{
MonoInst *inst;
int num = cfg->num_varinfo;
gboolean regpair;
type = mini_get_underlying_type (type);
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
}
cfg->stat_allocate_var++;
MONO_INST_NEW (cfg, inst, opcode);
inst->inst_c0 = num;
inst->inst_vtype = type;
inst->klass = mono_class_from_mono_type_internal (type);
mini_type_to_eval_stack_type (cfg, type, inst);
/* if set to 1 the variable is native */
inst->backend.is_pinvoke = 0;
inst->dreg = vreg;
if (mono_class_has_failure (inst->klass))
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
if (cfg->compute_gc_maps) {
if (m_type_is_byref (type)) {
mono_mark_vreg_as_mp (cfg, vreg);
} else {
if ((MONO_TYPE_ISSTRUCT (type) && m_class_has_references (inst->klass)) || mini_type_is_reference (type)) {
inst->flags |= MONO_INST_GC_TRACK;
mono_mark_vreg_as_ref (cfg, vreg);
}
}
}
#ifdef TARGET_WASM
if (mini_type_is_reference (type))
mono_mark_vreg_as_ref (cfg, vreg);
#endif
cfg->varinfo [num] = inst;
cfg->vars [num].idx = num;
cfg->vars [num].vreg = vreg;
cfg->vars [num].range.first_use.pos.bid = 0xffff;
cfg->vars [num].reg = -1;
if (vreg != -1)
set_vreg_to_inst (cfg, vreg, inst);
#if SIZEOF_REGISTER == 4
if (mono_arch_is_soft_float ()) {
regpair = mono_type_is_long (type) || mono_type_is_float (type);
} else {
regpair = mono_type_is_long (type);
}
#else
regpair = FALSE;
#endif
if (regpair) {
MonoInst *tree;
/*
* These two cannot be allocated using create_var_for_vreg since that would
* put it into the cfg->varinfo array, confusing many parts of the JIT.
*/
/*
* Set flags to VOLATILE so SSA skips it.
*/
if (cfg->verbose_level >= 4) {
printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, MONO_LVREG_LS (inst->dreg), MONO_LVREG_MS (inst->dreg));
}
if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
if (mono_type_is_float (type))
inst->flags = MONO_INST_VOLATILE;
}
/* Allocate a dummy MonoInst for the first vreg */
MONO_INST_NEW (cfg, tree, OP_LOCAL);
tree->dreg = MONO_LVREG_LS (inst->dreg);
if (cfg->opt & MONO_OPT_SSA)
tree->flags = MONO_INST_VOLATILE;
tree->inst_c0 = num;
tree->type = STACK_I4;
tree->inst_vtype = mono_get_int32_type ();
tree->klass = mono_class_from_mono_type_internal (tree->inst_vtype);
set_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg), tree);
/* Allocate a dummy MonoInst for the second vreg */
MONO_INST_NEW (cfg, tree, OP_LOCAL);
tree->dreg = MONO_LVREG_MS (inst->dreg);
if (cfg->opt & MONO_OPT_SSA)
tree->flags = MONO_INST_VOLATILE;
tree->inst_c0 = num;
tree->type = STACK_I4;
tree->inst_vtype = mono_get_int32_type ();
tree->klass = mono_class_from_mono_type_internal (tree->inst_vtype);
set_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg), tree);
}
cfg->num_varinfo++;
if (cfg->verbose_level > 2)
g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
return inst;
}
MonoInst*
mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
{
int dreg;
if (type->type == MONO_TYPE_VALUETYPE && !m_type_is_byref (type)) {
MonoClass *klass = mono_class_from_mono_type_internal (type);
if (m_class_is_enumtype (klass) && m_class_get_image (klass) == mono_get_corlib () && !strcmp (m_class_get_name (klass), "StackCrawlMark")) {
if (!(cfg->method->flags & METHOD_ATTRIBUTE_REQSECOBJ))
g_error ("Method '%s' which contains a StackCrawlMark local variable must be decorated with [System.Security.DynamicSecurityMethod].", mono_method_get_full_name (cfg->method));
}
}
type = mini_get_underlying_type (type);
if (mono_type_is_long (type))
dreg = mono_alloc_dreg (cfg, STACK_I8);
else if (mono_arch_is_soft_float () && mono_type_is_float (type))
dreg = mono_alloc_dreg (cfg, STACK_R8);
else
/* All the others are unified */
dreg = mono_alloc_preg (cfg);
return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
}
MonoInst*
mini_get_int_to_float_spill_area (MonoCompile *cfg)
{
#ifdef TARGET_X86
if (!cfg->iconv_raw_var) {
cfg->iconv_raw_var = mono_compile_create_var (cfg, mono_get_int32_type (), OP_LOCAL);
cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
}
return cfg->iconv_raw_var;
#else
return NULL;
#endif
}
void
mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
{
if (vreg >= cfg->vreg_is_ref_len) {
gboolean *tmp = cfg->vreg_is_ref;
int size = cfg->vreg_is_ref_len;
while (vreg >= cfg->vreg_is_ref_len)
cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
cfg->vreg_is_ref = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
if (size)
memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
}
cfg->vreg_is_ref [vreg] = TRUE;
}
void
mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
{
if (vreg >= cfg->vreg_is_mp_len) {
gboolean *tmp = cfg->vreg_is_mp;
int size = cfg->vreg_is_mp_len;
while (vreg >= cfg->vreg_is_mp_len)
cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
cfg->vreg_is_mp = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
if (size)
memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
}
cfg->vreg_is_mp [vreg] = TRUE;
}
static MonoType*
type_from_stack_type (MonoInst *ins)
{
switch (ins->type) {
case STACK_I4: return mono_get_int32_type ();
case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
case STACK_PTR: return mono_get_int_type ();
case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
case STACK_MP:
/*
* this if used to be commented without any specific reason, but
* it breaks #80235 when commented
*/
if (ins->klass)
return m_class_get_this_arg (ins->klass);
else
return mono_class_get_byref_type (mono_defaults.object_class);
case STACK_OBJ:
/* ins->klass may not be set for ldnull.
* Also, if we have a boxed valuetype, we want an object lass,
* not the valuetype class
*/
if (ins->klass && !m_class_is_valuetype (ins->klass))
return m_class_get_byval_arg (ins->klass);
return mono_get_object_type ();
case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
default:
g_error ("stack type %d to montype not handled\n", ins->type);
}
return NULL;
}
MonoType*
mono_type_from_stack_type (MonoInst *ins)
{
return type_from_stack_type (ins);
}
/*
* mono_add_ins_to_end:
*
* Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
*/
void
mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
{
int opcode;
if (!bb->code) {
MONO_ADD_INS (bb, inst);
return;
}
switch (bb->last_ins->opcode) {
case OP_BR:
case OP_BR_REG:
case CEE_BEQ:
case CEE_BGE:
case CEE_BGT:
case CEE_BLE:
case CEE_BLT:
case CEE_BNE_UN:
case CEE_BGE_UN:
case CEE_BGT_UN:
case CEE_BLE_UN:
case CEE_BLT_UN:
case OP_SWITCH:
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
break;
default:
if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
/* Need to insert the ins before the compare */
if (bb->code == bb->last_ins) {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
return;
}
if (bb->code->next == bb->last_ins) {
/* Only two instructions */
opcode = bb->code->opcode;
if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
/* NEW IR */
mono_bblock_insert_before_ins (bb, bb->code, inst);
} else {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
}
} else {
opcode = bb->last_ins->prev->opcode;
if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
/* NEW IR */
mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
} else {
mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
}
}
}
else
MONO_ADD_INS (bb, inst);
break;
}
}
void
mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
{
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
MonoJumpInfoBBTable *table;
table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
table->table = bbs;
table->table_size = num_blocks;
ji->ip.label = label;
ji->type = MONO_PATCH_INFO_SWITCH;
ji->data.table = table;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
gboolean
mini_assembly_can_skip_verification (MonoMethod *method)
{
MonoAssembly *assembly = m_class_get_image (method->klass)->assembly;
if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
return FALSE;
if (assembly->image == mono_defaults.corlib)
return FALSE;
return mono_assembly_has_skip_verification (assembly);
}
typedef struct {
MonoClass *vtype;
GList *active, *inactive;
GSList *slots;
} StackSlotInfo;
static gint
compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
{
MonoMethodVar *v1 = (MonoMethodVar*)a;
MonoMethodVar *v2 = (MonoMethodVar*)b;
if (v1 == v2)
return 0;
else if (v1->interval->range && v2->interval->range)
return v1->interval->range->from - v2->interval->range->from;
else if (v1->interval->range)
return -1;
else
return 1;
}
#if 0
#define LSCAN_DEBUG(a) do { a; } while (0)
#else
#define LSCAN_DEBUG(a) do { } while (0) /* non-empty to avoid warning */
#endif
static gint32*
mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
int i, slot, offset, size;
guint32 align;
MonoMethodVar *vmv;
MonoInst *inst;
gint32 *offsets;
GList *vars = NULL, *l, *unhandled;
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
int vtype_stack_slots_size = 256;
gboolean reuse_slot;
LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
for (i = 0; i < cfg->num_varinfo; ++i)
offsets [i] = -1;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
inst = cfg->varinfo [i];
vmv = MONO_VARINFO (cfg, i);
if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
continue;
vars = g_list_prepend (vars, vmv);
}
vars = g_list_sort (vars, compare_by_interval_start_pos_func);
/* Sanity check */
/*
i = 0;
for (unhandled = vars; unhandled; unhandled = unhandled->next) {
MonoMethodVar *current = unhandled->data;
if (current->interval->range) {
g_assert (current->interval->range->from >= i);
i = current->interval->range->from;
}
}
*/
offset = 0;
*stack_align = 0;
for (unhandled = vars; unhandled; unhandled = unhandled->next) {
MonoMethodVar *current = (MonoMethodVar *)unhandled->data;
vmv = current;
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structures */
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
}
else {
int ialign;
size = mini_type_stack_size (t, &ialign);
align = ialign;
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (t)))
align = 16;
}
reuse_slot = TRUE;
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
slot_info = &scalar_stack_slots [t->type];
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
if (!vtype_stack_slots)
vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * vtype_stack_slots_size);
for (i = 0; i < nvtypes; ++i)
if (t->data.klass == vtype_stack_slots [i].vtype)
break;
if (i < nvtypes)
slot_info = &vtype_stack_slots [i];
else {
if (nvtypes == vtype_stack_slots_size) {
int new_slots_size = vtype_stack_slots_size * 2;
StackSlotInfo* new_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * new_slots_size);
memcpy (new_slots, vtype_stack_slots, sizeof (StackSlotInfo) * vtype_stack_slots_size);
vtype_stack_slots = new_slots;
vtype_stack_slots_size = new_slots_size;
}
vtype_stack_slots [nvtypes].vtype = t->data.klass;
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
if (cfg->disable_ref_noref_stack_slot_share) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
/* Fall through */
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
default:
slot_info = &scalar_stack_slots [t->type];
}
slot = 0xffffff;
if (cfg->comp_done & MONO_COMP_LIVENESS) {
int pos;
gboolean changed;
//printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
if (!current->interval->range) {
if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
pos = ~0;
else {
/* Dead */
inst->flags |= MONO_INST_IS_DEAD;
continue;
}
}
else
pos = current->interval->range->from;
LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
if (current->interval->range)
LSCAN_DEBUG (mono_linterval_print (current->interval));
LSCAN_DEBUG (printf ("\n"));
/* Check for intervals in active which expired or inactive */
changed = TRUE;
/* FIXME: Optimize this */
while (changed) {
changed = FALSE;
for (l = slot_info->active; l != NULL; l = l->next) {
MonoMethodVar *v = (MonoMethodVar*)l->data;
if (v->interval->last_range->to < pos) {
slot_info->active = g_list_delete_link (slot_info->active, l);
slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
changed = TRUE;
break;
}
else if (!mono_linterval_covers (v->interval, pos)) {
slot_info->inactive = g_list_append (slot_info->inactive, v);
slot_info->active = g_list_delete_link (slot_info->active, l);
LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
changed = TRUE;
break;
}
}
}
/* Check for intervals in inactive which expired or active */
changed = TRUE;
/* FIXME: Optimize this */
while (changed) {
changed = FALSE;
for (l = slot_info->inactive; l != NULL; l = l->next) {
MonoMethodVar *v = (MonoMethodVar*)l->data;
if (v->interval->last_range->to < pos) {
slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
// FIXME: Enabling this seems to cause impossible to debug crashes
//slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
changed = TRUE;
break;
}
else if (mono_linterval_covers (v->interval, pos)) {
slot_info->active = g_list_append (slot_info->active, v);
slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
changed = TRUE;
break;
}
}
}
/*
* This also handles the case when the variable is used in an
* exception region, as liveness info is not computed there.
*/
/*
* FIXME: All valuetypes are marked as INDIRECT because of LDADDR
* opcodes.
*/
if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
if (slot_info->slots) {
slot = GPOINTER_TO_INT (slot_info->slots->data);
slot_info->slots = slot_info->slots->next;
}
/* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
}
}
#if 0
{
static int count = 0;
count ++;
if (count == atoi (g_getenv ("COUNT3")))
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
if (count > atoi (g_getenv ("COUNT3")))
slot = 0xffffff;
else
mono_print_ins (inst);
}
#endif
LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
if (inst->flags & MONO_INST_LMF) {
size = MONO_ABI_SIZEOF (MonoLMF);
align = sizeof (target_mgreg_t);
reuse_slot = FALSE;
}
if (!reuse_slot)
slot = 0xffffff;
if (slot == 0xffffff) {
/*
* Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
* efficient copying (and to work around the fact that OP_MEMCPY
* and OP_MEMSET ignores alignment).
*/
if (MONO_TYPE_ISSTRUCT (t)) {
align = MAX (align, sizeof (target_mgreg_t));
align = MAX (align, mono_class_min_align (mono_class_from_mono_type_internal (t)));
}
if (backward) {
offset += size;
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
}
else {
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
offset += size;
}
if (*stack_align == 0)
*stack_align = align;
}
offsets [vmv->idx] = slot;
}
g_list_free (vars);
for (i = 0; i < MONO_TYPE_PINNED; ++i) {
if (scalar_stack_slots [i].active)
g_list_free (scalar_stack_slots [i].active);
}
for (i = 0; i < nvtypes; ++i) {
if (vtype_stack_slots [i].active)
g_list_free (vtype_stack_slots [i].active);
}
cfg->stat_locals_stack_size += offset;
*stack_size = offset;
return offsets;
}
/*
* mono_allocate_stack_slots:
*
* Allocate stack slots for all non register allocated variables using a
* linear scan algorithm.
* Returns: an array of stack offsets.
* STACK_SIZE is set to the amount of stack space needed.
* STACK_ALIGN is set to the alignment needed by the locals area.
*/
gint32*
mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
int i, slot, offset, size;
guint32 align;
MonoMethodVar *vmv;
MonoInst *inst;
gint32 *offsets;
GList *vars = NULL, *l;
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
int vtype_stack_slots_size = 256;
gboolean reuse_slot;
if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
for (i = 0; i < cfg->num_varinfo; ++i)
offsets [i] = -1;
for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
inst = cfg->varinfo [i];
vmv = MONO_VARINFO (cfg, i);
if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
continue;
vars = g_list_prepend (vars, vmv);
}
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
*stack_align = sizeof (target_mgreg_t);
for (l = vars; l; l = l->next) {
vmv = (MonoMethodVar *)l->data;
inst = cfg->varinfo [vmv->idx];
t = mono_type_get_underlying_type (inst->inst_vtype);
if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
continue;
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structures */
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
} else {
int ialign;
size = mini_type_stack_size (t, &ialign);
align = ialign;
if (mono_class_has_failure (mono_class_from_mono_type_internal (t)))
mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (t)))
align = 16;
}
reuse_slot = TRUE;
if (cfg->disable_reuse_stack_slots)
reuse_slot = FALSE;
t = mini_get_underlying_type (t);
switch (t->type) {
case MONO_TYPE_GENERICINST:
if (!mono_type_generic_inst_is_valuetype (t)) {
slot_info = &scalar_stack_slots [t->type];
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
if (!vtype_stack_slots)
vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * vtype_stack_slots_size);
for (i = 0; i < nvtypes; ++i)
if (t->data.klass == vtype_stack_slots [i].vtype)
break;
if (i < nvtypes)
slot_info = &vtype_stack_slots [i];
else {
if (nvtypes == vtype_stack_slots_size) {
int new_slots_size = vtype_stack_slots_size * 2;
StackSlotInfo* new_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * new_slots_size);
memcpy (new_slots, vtype_stack_slots, sizeof (StackSlotInfo) * vtype_stack_slots_size);
vtype_stack_slots = new_slots;
vtype_stack_slots_size = new_slots_size;
}
vtype_stack_slots [nvtypes].vtype = t->data.klass;
slot_info = &vtype_stack_slots [nvtypes];
nvtypes ++;
}
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
#if TARGET_SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
#endif
if (cfg->disable_ref_noref_stack_slot_share) {
slot_info = &scalar_stack_slots [MONO_TYPE_I];
break;
}
/* Fall through */
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_ARRAY:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_STRING:
/* Share non-float stack slots of the same size */
slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
if (cfg->disable_reuse_ref_stack_slots)
reuse_slot = FALSE;
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
slot_info = &scalar_stack_slots [t->type];
break;
default:
slot_info = &scalar_stack_slots [t->type];
break;
}
slot = 0xffffff;
if (cfg->comp_done & MONO_COMP_LIVENESS) {
//printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
/* expire old intervals in active */
while (slot_info->active) {
MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
break;
//printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
}
/*
* This also handles the case when the variable is used in an
* exception region, as liveness info is not computed there.
*/
/*
* FIXME: All valuetypes are marked as INDIRECT because of LDADDR
* opcodes.
*/
if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
if (slot_info->slots) {
slot = GPOINTER_TO_INT (slot_info->slots->data);
slot_info->slots = slot_info->slots->next;
}
slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
}
}
#if 0
{
static int count = 0;
count ++;
if (count == atoi (g_getenv ("COUNT")))
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
if (count > atoi (g_getenv ("COUNT")))
slot = 0xffffff;
else
mono_print_ins (inst);
}
#endif
if (inst->flags & MONO_INST_LMF) {
/*
* This variable represents a MonoLMF structure, which has no corresponding
* CLR type, so hard-code its size/alignment.
*/
size = MONO_ABI_SIZEOF (MonoLMF);
align = sizeof (target_mgreg_t);
reuse_slot = FALSE;
}
if (!reuse_slot)
slot = 0xffffff;
if (slot == 0xffffff) {
/*
* Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
* efficient copying (and to work around the fact that OP_MEMCPY
* and OP_MEMSET ignores alignment).
*/
if (MONO_TYPE_ISSTRUCT (t)) {
align = MAX (align, sizeof (target_mgreg_t));
align = MAX (align, mono_class_min_align (mono_class_from_mono_type_internal (t)));
/*
* Align the size too so the code generated for passing vtypes in
* registers doesn't overwrite random locals.
*/
size = (size + (align - 1)) & ~(align -1);
}
if (backward) {
offset += size;
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
}
else {
offset += align - 1;
offset &= ~(align - 1);
slot = offset;
offset += size;
}
*stack_align = MAX (*stack_align, align);
}
offsets [vmv->idx] = slot;
}
g_list_free (vars);
for (i = 0; i < MONO_TYPE_PINNED; ++i) {
if (scalar_stack_slots [i].active)
g_list_free (scalar_stack_slots [i].active);
}
for (i = 0; i < nvtypes; ++i) {
if (vtype_stack_slots [i].active)
g_list_free (vtype_stack_slots [i].active);
}
cfg->stat_locals_stack_size += offset;
*stack_size = offset;
return offsets;
}
#define EMUL_HIT_SHIFT 3
#define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
/* small hit bitmap cache */
static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
static short emul_opcode_num = 0;
static short emul_opcode_alloced = 0;
static short *emul_opcode_opcodes;
static MonoJitICallInfo **emul_opcode_map;
MonoJitICallInfo *
mono_find_jit_opcode_emulation (int opcode)
{
g_assert (opcode >= 0 && opcode <= OP_LAST);
if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
int i;
for (i = 0; i < emul_opcode_num; ++i) {
if (emul_opcode_opcodes [i] == opcode)
return emul_opcode_map [i];
}
}
return NULL;
}
void
mini_register_opcode_emulation (int opcode, MonoJitICallInfo *info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_wrapper)
{
g_assert (info);
g_assert (!sig->hasthis);
g_assert (sig->param_count < 3);
mono_register_jit_icall_info (info, func, name, sig, no_wrapper, symbol);
if (emul_opcode_num >= emul_opcode_alloced) {
int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
emul_opcode_alloced += incr;
emul_opcode_map = (MonoJitICallInfo **)g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
emul_opcode_opcodes = (short *)g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
}
emul_opcode_map [emul_opcode_num] = info;
emul_opcode_opcodes [emul_opcode_num] = opcode;
emul_opcode_num++;
emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
}
static void
print_dfn (MonoCompile *cfg)
{
int i, j;
char *code;
MonoBasicBlock *bb;
MonoInst *c;
{
char *method_name = mono_method_full_name (cfg->method, TRUE);
g_print ("IR code for method %s\n", method_name);
g_free (method_name);
}
for (i = 0; i < cfg->num_bblocks; ++i) {
bb = cfg->bblocks [i];
/*if (bb->cil_code) {
char* code1, *code2;
code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
if (bb->last_ins->cil_code)
code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
else
code2 = g_strdup ("");
code1 [strlen (code1) - 1] = 0;
code = g_strdup_printf ("%s -> %s", code1, code2);
g_free (code1);
g_free (code2);
} else*/
code = g_strdup ("\n");
g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
MONO_BB_FOR_EACH_INS (bb, c) {
mono_print_ins_index (-1, c);
}
g_print ("\tprev:");
for (j = 0; j < bb->in_count; ++j) {
g_print (" BB%d", bb->in_bb [j]->block_num);
}
g_print ("\t\tsucc:");
for (j = 0; j < bb->out_count; ++j) {
g_print (" BB%d", bb->out_bb [j]->block_num);
}
g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
if (bb->idom)
g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
if (bb->dominators)
mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
if (bb->dfrontier)
mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
g_free (code);
}
g_print ("\n");
}
void
mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
{
MONO_ADD_INS (bb, inst);
}
void
mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
{
if (ins == NULL) {
ins = bb->code;
bb->code = ins_to_insert;
/* Link with next */
ins_to_insert->next = ins;
if (ins)
ins->prev = ins_to_insert;
if (bb->last_ins == NULL)
bb->last_ins = ins_to_insert;
} else {
/* Link with next */
ins_to_insert->next = ins->next;
if (ins->next)
ins->next->prev = ins_to_insert;
/* Link with previous */
ins->next = ins_to_insert;
ins_to_insert->prev = ins;
if (bb->last_ins == ins)
bb->last_ins = ins_to_insert;
}
}
void
mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
{
if (ins == NULL) {
ins = bb->code;
if (ins)
ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
bb->last_ins = ins_to_insert;
} else {
/* Link with previous */
if (ins->prev)
ins->prev->next = ins_to_insert;
ins_to_insert->prev = ins->prev;
/* Link with next */
ins->prev = ins_to_insert;
ins_to_insert->next = ins;
if (bb->code == ins)
bb->code = ins_to_insert;
}
}
/*
* mono_verify_bblock:
*
* Verify that the next and prev pointers are consistent inside the instructions in BB.
*/
void
mono_verify_bblock (MonoBasicBlock *bb)
{
MonoInst *ins, *prev;
prev = NULL;
for (ins = bb->code; ins; ins = ins->next) {
g_assert (ins->prev == prev);
prev = ins;
}
if (bb->last_ins)
g_assert (!bb->last_ins->next);
}
/*
* mono_verify_cfg:
*
* Perform consistency checks on the JIT data structures and the IR
*/
void
mono_verify_cfg (MonoCompile *cfg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
mono_verify_bblock (bb);
}
// This will free many fields in cfg to save
// memory. Note that this must be safe to call
// multiple times. It must be idempotent.
void
mono_empty_compile (MonoCompile *cfg)
{
mono_free_loop_info (cfg);
// These live in the mempool, and so must be freed
// first
for (GSList *l = cfg->headers_to_free; l; l = l->next) {
mono_metadata_free_mh ((MonoMethodHeader *)l->data);
}
cfg->headers_to_free = NULL;
if (cfg->mempool) {
//mono_mempool_stats (cfg->mempool);
mono_mempool_destroy (cfg->mempool);
cfg->mempool = NULL;
}
g_free (cfg->varinfo);
cfg->varinfo = NULL;
g_free (cfg->vars);
cfg->vars = NULL;
if (cfg->rs) {
mono_regstate_free (cfg->rs);
cfg->rs = NULL;
}
}
void
mono_destroy_compile (MonoCompile *cfg)
{
mono_empty_compile (cfg);
mono_metadata_free_mh (cfg->header);
g_hash_table_destroy (cfg->spvars);
g_hash_table_destroy (cfg->exvars);
g_list_free (cfg->ldstr_list);
g_hash_table_destroy (cfg->token_info_hash);
g_hash_table_destroy (cfg->abs_patches);
mono_debug_free_method (cfg);
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg->exception_message);
g_free (cfg);
}
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
if (type == MONO_PATCH_INFO_NONE)
return;
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->data.target = target;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
void
mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
{
if (type == MONO_PATCH_INFO_NONE)
return;
MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
ji->ip.i = ip;
ji->type = type;
ji->relocation = relocation;
ji->data.target = target;
ji->next = cfg->patch_info;
cfg->patch_info = ji;
}
void
mono_remove_patch_info (MonoCompile *cfg, int ip)
{
MonoJumpInfo **ji = &cfg->patch_info;
while (*ji) {
if ((*ji)->ip.i == ip)
*ji = (*ji)->next;
else
ji = &((*ji)->next);
}
}
void
mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
{
ins->inst_offset = native_offset;
g_ptr_array_add (cfg->seq_points, ins);
if (bb) {
bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
bb->last_seq_point = ins;
}
}
void
mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
{
MonoDwarfLocListEntry *entry = (MonoDwarfLocListEntry *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
if (is_reg)
g_assert (offset == 0);
entry->is_reg = is_reg;
entry->reg = reg;
entry->offset = offset;
entry->from = from;
entry->to = to;
if (var == cfg->args [0])
cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
else if (var == cfg->rgctx_var)
cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
}
static void
mono_apply_volatile (MonoInst *inst, MonoBitSet *set, gsize index)
{
inst->flags |= mono_bitset_test_safe (set, index) ? MONO_INST_VOLATILE : 0;
}
static void
mono_compile_create_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i;
header = cfg->header;
sig = mono_method_signature_internal (cfg->method);
if (!MONO_TYPE_IS_VOID (sig->ret)) {
cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
/* Inhibit optimizations */
cfg->ret->flags |= MONO_INST_VOLATILE;
}
if (cfg->verbose_level > 2)
g_print ("creating vars\n");
cfg->args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
if (sig->hasthis) {
MonoInst* arg = mono_compile_create_var (cfg, m_class_get_this_arg (cfg->method->klass), OP_ARG);
mono_apply_volatile (arg, header->volatile_args, 0);
cfg->args [0] = arg;
cfg->this_arg = arg;
}
for (i = 0; i < sig->param_count; ++i) {
MonoInst* arg = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
mono_apply_volatile (arg, header->volatile_args, i + sig->hasthis);
cfg->args [i + sig->hasthis] = arg;
}
if (cfg->verbose_level > 2) {
if (cfg->ret) {
printf ("\treturn : ");
mono_print_ins (cfg->ret);
}
if (sig->hasthis) {
printf ("\tthis: ");
mono_print_ins (cfg->args [0]);
}
for (i = 0; i < sig->param_count; ++i) {
printf ("\targ [%d]: ", i);
mono_print_ins (cfg->args [i + sig->hasthis]);
}
}
cfg->locals_start = cfg->num_varinfo;
cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
if (cfg->verbose_level > 2)
g_print ("creating locals\n");
for (i = 0; i < header->num_locals; ++i) {
if (cfg->verbose_level > 2)
g_print ("\tlocal [%d]: ", i);
cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
mono_apply_volatile (cfg->locals [i], header->volatile_locals, i);
}
if (cfg->verbose_level > 2)
g_print ("locals done\n");
#ifdef ENABLE_LLVM
if (COMPILE_LLVM (cfg))
mono_llvm_create_vars (cfg);
else
mono_arch_create_vars (cfg);
#else
mono_arch_create_vars (cfg);
#endif
if (cfg->method->save_lmf && cfg->create_lmf_var) {
MonoInst *lmf_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
lmf_var->flags |= MONO_INST_VOLATILE;
lmf_var->flags |= MONO_INST_LMF;
cfg->lmf_var = lmf_var;
}
}
void
mono_print_code (MonoCompile *cfg, const char* msg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
mono_print_bb (bb, msg);
}
static void
mono_postprocess_patches (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
int i;
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_ABS: {
/*
* Change patches of type MONO_PATCH_INFO_ABS into patches describing the
* absolute address.
*/
if (cfg->abs_patches) {
MonoJumpInfo *abs_ji = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
if (abs_ji) {
patch_info->type = abs_ji->type;
patch_info->data.target = abs_ji->data.target;
}
}
break;
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
if (cfg->method->dynamic) {
table = (void **)mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = (void **)mono_mem_manager_code_reserve (cfg->mem_manager, sizeof (gpointer) * patch_info->data.table->table_size);
}
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
if (patch_info->data.table->table [i]) {
g_assert (patch_info->data.table->table [i]->native_offset);
table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
} else {
table [i] = NULL;
}
}
patch_info->data.table->table = (MonoBasicBlock**)table;
break;
}
default:
/* do nothing */
break;
}
}
}
/* Those patches require the JitInfo of the compiled method already be in place when used */
static void
mono_postprocess_patches_after_ji_publish (MonoCompile *cfg)
{
MonoJumpInfo *patch_info;
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_METHOD_JUMP: {
unsigned char *ip = cfg->native_code + patch_info->ip.i;
mini_register_jump_site (patch_info->data.method, ip);
break;
}
default:
/* do nothing */
break;
}
}
}
void
mono_codegen (MonoCompile *cfg)
{
MonoBasicBlock *bb;
int max_epilog_size;
guint8 *code;
MonoMemoryManager *code_mem_manager = cfg->mem_manager;
guint unwindlen = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
/* we reuse dfn here */
/* bb->dfn = bb_count++; */
mono_arch_lowering_pass (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
mono_bb_deduplicate_op_il_seq_points (cfg, bb);
}
code = mono_arch_emit_prolog (cfg);
set_code_cursor (cfg, code);
cfg->prolog_end = cfg->code_len;
cfg->cfa_reg = cfg->cur_cfa_reg;
cfg->cfa_offset = cfg->cur_cfa_offset;
mono_debug_open_method (cfg);
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
bb->real_native_offset = cfg->code_len;
//if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
mono_arch_output_basic_block (cfg, bb);
bb->native_length = cfg->code_len - bb->native_offset;
if (bb == cfg->bb_exit) {
cfg->epilog_begin = cfg->code_len;
mono_arch_emit_epilog (cfg);
cfg->epilog_end = cfg->code_len;
}
if (bb->clause_holes) {
GList *tmp;
for (tmp = bb->clause_holes; tmp; tmp = tmp->prev)
mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, cfg->native_code + bb->native_offset, bb);
}
}
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
cfg->code_size = cfg->code_len + max_epilog_size;
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
if (!cfg->compile_aot)
unwindlen = mono_arch_unwindinfo_init_method_unwind_info (cfg);
#endif
if (cfg->method->dynamic) {
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
jit_mm_lock (jit_mm);
if (!jit_mm->dynamic_code_hash)
jit_mm->dynamic_code_hash = g_hash_table_new (NULL, NULL);
g_hash_table_insert (jit_mm->dynamic_code_hash, cfg->method, cfg->dynamic_info);
jit_mm_unlock (jit_mm);
code = (guint8 *)mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
} else {
code = (guint8 *)mono_mem_manager_code_reserve (code_mem_manager, cfg->code_size + cfg->thunk_area + unwindlen);
}
mono_codeman_enable_write ();
if (cfg->thunk_area) {
cfg->thunks_offset = cfg->code_size + unwindlen;
cfg->thunks = code + cfg->thunks_offset;
memset (cfg->thunks, 0, cfg->thunk_area);
}
g_assert (code);
memcpy (code, cfg->native_code, cfg->code_len);
g_free (cfg->native_code);
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
/* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
mono_postprocess_patches (cfg);
#ifdef VALGRIND_JIT_REGISTER_MAP
if (valgrind_register){
char* nm = mono_method_full_name (cfg->method, TRUE);
VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
g_free (nm);
}
#endif
if (cfg->verbose_level > 0) {
char* nm = mono_method_get_full_name (cfg->method);
g_print ("Method %s emitted at %p to %p (code length %d)\n",
nm,
cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len);
g_free (nm);
}
{
gboolean is_generic = FALSE;
if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
mono_class_is_gtd (cfg->method->klass) || mono_class_is_ginst (cfg->method->klass)) {
is_generic = TRUE;
}
if (cfg->gshared)
g_assert (is_generic);
}
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
{
MonoJumpInfo *ji;
gpointer target;
for (ji = cfg->patch_info; ji; ji = ji->next) {
if (cfg->compile_aot) {
switch (ji->type) {
case MONO_PATCH_INFO_BB:
case MONO_PATCH_INFO_LABEL:
break;
default:
/* No need to patch these */
continue;
}
}
if (ji->type == MONO_PATCH_INFO_NONE)
continue;
target = mono_resolve_patch_target (cfg->method, cfg->native_code, ji, cfg->run_cctors, cfg->error);
if (!is_ok (cfg->error)) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
return;
}
mono_arch_patch_code_new (cfg, cfg->native_code, ji, target);
}
}
if (cfg->method->dynamic) {
mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
} else {
mono_mem_manager_code_commit (code_mem_manager, cfg->native_code, cfg->code_size, cfg->code_len);
}
mono_codeman_disable_write ();
MONO_PROFILER_RAISE (jit_code_buffer, (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method));
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
mono_debug_close_method (cfg);
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
if (!cfg->compile_aot)
mono_arch_unwindinfo_install_method_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
#endif
}
static void
compute_reachable (MonoBasicBlock *bb)
{
int i;
if (!(bb->flags & BB_VISITED)) {
bb->flags |= BB_VISITED;
for (i = 0; i < bb->out_count; ++i)
compute_reachable (bb->out_bb [i]);
}
}
static void mono_bb_ordering (MonoCompile *cfg)
{
int dfn = 0;
/* Depth-first ordering on basic blocks */
cfg->bblocks = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
cfg->max_block_num = cfg->num_bblocks;
df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
#if defined(__GNUC__) && __GNUC__ == 7 && defined(__x86_64__)
/* workaround for an AMD specific issue that only happens on GCC 7 so far,
* for more information see https://github.com/mono/mono/issues/9298 */
mono_memory_barrier ();
#endif
g_assertf (cfg->num_bblocks >= dfn, "cfg->num_bblocks=%d, dfn=%d\n", cfg->num_bblocks, dfn);
if (cfg->num_bblocks != dfn + 1) {
MonoBasicBlock *bb;
cfg->num_bblocks = dfn + 1;
/* remove unreachable code, because the code in them may be
* inconsistent (access to dead variables for example) */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
bb->flags &= ~BB_VISITED;
compute_reachable (cfg->bb_entry);
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
if (bb->flags & BB_EXCEPTION_HANDLER)
compute_reachable (bb);
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (!(bb->flags & BB_VISITED)) {
if (cfg->verbose_level > 1)
g_print ("found unreachable code in BB%d\n", bb->block_num);
bb->code = bb->last_ins = NULL;
while (bb->out_count)
mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
}
}
for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
bb->flags &= ~BB_VISITED;
}
}
static void
mono_handle_out_of_line_bblock (MonoCompile *cfg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_BR);
MONO_ADD_INS (bb, ins);
ins->inst_target_bb = bb->next_bb;
}
}
}
static MonoJitInfo*
create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
{
GSList *tmp;
MonoMethodHeader *header;
MonoJitInfo *jinfo;
MonoJitInfoFlags flags = JIT_INFO_NONE;
int num_clauses, num_holes = 0;
guint32 stack_size = 0;
g_assert (method_to_compile == cfg->method);
header = cfg->header;
if (cfg->gshared)
flags |= JIT_INFO_HAS_GENERIC_JIT_INFO;
if (cfg->arch_eh_jit_info) {
MonoJitArgumentInfo *arg_info;
MonoMethodSignature *sig = mono_method_signature_internal (cfg->method_to_register);
/*
* This cannot be computed during stack walking, as
* mono_arch_get_argument_info () is not signal safe.
*/
arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
if (stack_size)
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
}
if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
flags |= JIT_INFO_HAS_ARCH_EH_INFO;
if (cfg->thunk_area)
flags |= JIT_INFO_HAS_THUNK_INFO;
if (cfg->try_block_holes) {
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
TryBlockHole *hole = (TryBlockHole *)tmp->data;
MonoExceptionClause *ec = hole->clause;
int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
g_assert (clause_last_bb);
/* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
if (clause_last_bb->native_offset != hole_end)
++num_holes;
}
if (num_holes)
flags |= JIT_INFO_HAS_TRY_BLOCK_HOLES;
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("Number of try block holes %d\n", num_holes);
}
if (COMPILE_LLVM (cfg)) {
num_clauses = cfg->llvm_ex_info_len;
} else {
num_clauses = header->num_clauses;
int dead_clauses = 0;
for (int i = 0; i < header->num_clauses; ++i)
if (cfg->clause_is_dead [i])
dead_clauses ++;
num_clauses -= dead_clauses;
}
if (cfg->method->dynamic)
jinfo = (MonoJitInfo *)g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
else
jinfo = (MonoJitInfo *)mono_mem_manager_alloc0 (cfg->mem_manager, mono_jit_info_size (flags, num_clauses, num_holes));
jinfo_try_holes_size += num_holes * sizeof (MonoTryBlockHoleJitInfo);
mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
if (COMPILE_LLVM (cfg))
jinfo->from_llvm = TRUE;
if (cfg->gshared) {
MonoInst *inst;
MonoGenericJitInfo *gi;
GSList *loclist = NULL;
gi = mono_jit_info_get_generic_jit_info (jinfo);
g_assert (gi);
if (cfg->method->dynamic)
gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
else
gi->generic_sharing_context = (MonoGenericSharingContext *)mono_mem_manager_alloc0 (cfg->mem_manager, sizeof (MonoGenericSharingContext));
mini_init_gsctx (NULL, cfg->gsctx_context, gi->generic_sharing_context);
if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method_to_compile)->method_inst ||
m_class_is_valuetype (method_to_compile->klass)) {
g_assert (cfg->rgctx_var);
}
gi->has_this = 1;
if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
mini_method_get_context (method_to_compile)->method_inst ||
m_class_is_valuetype (method_to_compile->klass)) {
inst = cfg->rgctx_var;
if (!COMPILE_LLVM (cfg))
g_assert (inst->opcode == OP_REGOFFSET);
loclist = cfg->rgctx_loclist;
} else {
inst = cfg->args [0];
loclist = cfg->this_loclist;
}
if (loclist) {
/* Needed to handle async exceptions */
GSList *l;
int i;
gi->nlocs = g_slist_length (loclist);
if (cfg->method->dynamic)
gi->locations = (MonoDwarfLocListEntry *)g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
else
gi->locations = (MonoDwarfLocListEntry *)mono_mem_manager_alloc0 (cfg->mem_manager, gi->nlocs * sizeof (MonoDwarfLocListEntry));
i = 0;
for (l = loclist; l; l = l->next) {
memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
i ++;
}
}
if (COMPILE_LLVM (cfg)) {
g_assert (cfg->llvm_this_reg != -1);
gi->this_in_reg = 0;
gi->this_reg = cfg->llvm_this_reg;
gi->this_offset = cfg->llvm_this_offset;
} else if (inst->opcode == OP_REGVAR) {
gi->this_in_reg = 1;
gi->this_reg = inst->dreg;
} else {
g_assert (inst->opcode == OP_REGOFFSET);
#ifdef TARGET_X86
g_assert (inst->inst_basereg == X86_EBP);
#elif defined(TARGET_AMD64)
g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
#endif
g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
gi->this_in_reg = 0;
gi->this_reg = inst->inst_basereg;
gi->this_offset = inst->inst_offset;
}
}
if (num_holes) {
MonoTryBlockHoleTableJitInfo *table;
int i;
table = mono_jit_info_get_try_block_hole_table_info (jinfo);
table->num_holes = (guint16)num_holes;
i = 0;
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
guint32 start_bb_offset;
MonoTryBlockHoleJitInfo *hole;
TryBlockHole *hole_data = (TryBlockHole *)tmp->data;
MonoExceptionClause *ec = hole_data->clause;
int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
g_assert (clause_last_bb);
/* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
if (clause_last_bb->native_offset == hole_end)
continue;
start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
hole = &table->holes [i++];
hole->clause = hole_data->clause - &header->clauses [0];
hole->offset = (guint32)hole_data->start_offset;
hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
}
g_assert (i == num_holes);
}
if (jinfo->has_arch_eh_info) {
MonoArchEHJitInfo *info;
info = mono_jit_info_get_arch_eh_info (jinfo);
info->stack_size = stack_size;
}
if (cfg->thunk_area) {
MonoThunkJitInfo *info;
info = mono_jit_info_get_thunk_info (jinfo);
info->thunks_offset = cfg->thunks_offset;
info->thunks_size = cfg->thunk_area;
}
if (COMPILE_LLVM (cfg)) {
if (num_clauses)
memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
} else {
int eindex = 0;
for (int i = 0; i < header->num_clauses; i++) {
MonoExceptionClause *ec = &header->clauses [i];
MonoJitExceptionInfo *ei = &jinfo->clauses [eindex];
MonoBasicBlock *tblock;
MonoInst *exvar;
if (cfg->clause_is_dead [i])
continue;
eindex ++;
ei->flags = ec->flags;
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
ei->exvar_offset = exvar ? exvar->inst_offset : 0;
if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
g_assert (tblock);
ei->data.filter = cfg->native_code + tblock->native_offset;
} else {
ei->data.catch_class = ec->data.catch_class;
}
tblock = cfg->cil_offset_to_bb [ec->try_offset];
g_assert (tblock);
g_assert (tblock->native_offset);
ei->try_start = cfg->native_code + tblock->native_offset;
if (tblock->extend_try_block) {
/*
* Extend the try block backwards to include parts of the previous call
* instruction.
*/
ei->try_start = (guint8*)ei->try_start - cfg->backend->monitor_enter_adjustment;
}
if (ec->try_offset + ec->try_len < header->code_size)
tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
else
tblock = cfg->bb_exit;
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec->try_offset, ec->try_len, tblock, header->code_size);
g_assert (tblock);
if (!tblock->native_offset) {
int j, end;
for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
if (bb && bb->native_offset) {
tblock = bb;
break;
}
}
}
ei->try_end = cfg->native_code + tblock->native_offset;
g_assert (tblock->native_offset);
tblock = cfg->cil_offset_to_bb [ec->handler_offset];
g_assert (tblock);
ei->handler_start = cfg->native_code + tblock->native_offset;
for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
TryBlockHole *hole = (TryBlockHole *)tmp->data;
gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
if (hole->clause == ec && hole_end == ei->try_end) {
if (G_UNLIKELY (cfg->verbose_level >= 4))
printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
ei->try_end = cfg->native_code + hole->start_offset;
break;
}
}
if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
int end_offset;
if (ec->handler_offset + ec->handler_len < header->code_size) {
tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
if (tblock->native_offset) {
end_offset = tblock->native_offset;
} else {
int j, end;
for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
if (bb && bb->native_offset) {
tblock = bb;
break;
}
}
end_offset = tblock->native_offset + tblock->native_length;
}
} else {
end_offset = cfg->epilog_begin;
}
ei->data.handler_end = cfg->native_code + end_offset;
}
/* Keep try_start/end non-authenticated, they are never branched to */
//ei->try_start = MINI_ADDR_TO_FTNPTR (ei->try_start);
//ei->try_end = MINI_ADDR_TO_FTNPTR (ei->try_end);
ei->handler_start = MINI_ADDR_TO_FTNPTR (ei->handler_start);
if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER)
ei->data.filter = MINI_ADDR_TO_FTNPTR (ei->data.filter);
else if (ei->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
ei->data.handler_end = MINI_ADDR_TO_FTNPTR (ei->data.handler_end);
}
}
if (G_UNLIKELY (cfg->verbose_level >= 4)) {
int i;
for (i = 0; i < jinfo->num_clauses; i++) {
MonoJitExceptionInfo *ei = &jinfo->clauses [i];
int start = (guint8*)ei->try_start - cfg->native_code;
int end = (guint8*)ei->try_end - cfg->native_code;
int handler = (guint8*)ei->handler_start - cfg->native_code;
int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
}
}
if (cfg->encoded_unwind_ops) {
/* Generated by LLVM */
jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
g_free (cfg->encoded_unwind_ops);
} else if (cfg->unwind_ops) {
guint32 info_len;
guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
guint32 unwind_desc;
unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
if (cfg->has_unwind_info_for_epilog) {
MonoArchEHJitInfo *info;
info = mono_jit_info_get_arch_eh_info (jinfo);
g_assert (info);
info->epilog_size = cfg->code_len - cfg->epilog_begin;
}
jinfo->unwind_info = unwind_desc;
g_free (unwind_info);
} else {
jinfo->unwind_info = cfg->used_int_regs;
}
return jinfo;
}
/* Return whenever METHOD is a gsharedvt method */
static gboolean
is_gsharedvt_method (MonoMethod *method)
{
MonoGenericContext *context;
MonoGenericInst *inst;
int i;
if (!method->is_inflated)
return FALSE;
context = mono_method_get_context (method);
inst = context->class_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
inst = context->method_inst;
if (inst) {
for (i = 0; i < inst->type_argc; ++i)
if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
return TRUE;
}
return FALSE;
}
static gboolean
is_open_method (MonoMethod *method)
{
MonoGenericContext *context;
if (!method->is_inflated)
return FALSE;
context = mono_method_get_context (method);
if (context->class_inst && context->class_inst->is_open)
return TRUE;
if (context->method_inst && context->method_inst->is_open)
return TRUE;
return FALSE;
}
static void
mono_insert_nop_in_empty_bb (MonoCompile *cfg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->code)
continue;
MonoInst *nop;
MONO_INST_NEW (cfg, nop, OP_NOP);
MONO_ADD_INS (bb, nop);
}
}
static void
insert_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
{
MonoInst *poll_addr, *ins;
if (cfg->disable_gc_safe_points)
return;
if (cfg->verbose_level > 1)
printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
g_assert (mini_safepoints_enabled ());
NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
ins->sreg1 = poll_addr->dreg;
if (bblock->flags & BB_EXCEPTION_HANDLER) {
MonoInst *eh_op = bblock->code;
if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
eh_op = NULL;
} else {
MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
// skip all EH relateds ops
while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
eh_op = next_eh_op;
next_eh_op = eh_op->next;
}
}
mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
} else if (bblock == cfg->bb_entry) {
mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
} else {
mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
mono_bblock_insert_after_ins (bblock, poll_addr, ins);
}
}
/*
This code inserts safepoints into managed code at important code paths.
Those are:
-the first basic block
-landing BB for exception handlers
-loop body starts.
*/
static void
insert_safepoints (MonoCompile *cfg)
{
MonoBasicBlock *bb;
g_assert (mini_safepoints_enabled ());
if (COMPILE_LLVM (cfg)) {
if (!cfg->llvm_only) {
/* We rely on LLVM's safepoints insertion capabilities. */
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for code compiled with LLVM\n");
return;
}
}
if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
/* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER &&
(info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_threads_state_poll ||
info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_thread_interruption_checkpoint ||
info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_threads_exit_gc_safe_region_unbalanced)) {
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
return;
}
}
if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
return;
}
if (cfg->method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
if (info && (info->subtype == WRAPPER_SUBTYPE_INTERP_IN || info->subtype == WRAPPER_SUBTYPE_INTERP_LMF)) {
/* These wrappers shouldn't do any icalls */
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for interp-in wrappers.\n");
return;
}
}
if (cfg->method->wrapper_type == MONO_WRAPPER_WRITE_BARRIER) {
if (cfg->verbose_level > 1)
printf ("SKIPPING SAFEPOINTS for write barrier wrappers.\n");
return;
}
if (cfg->verbose_level > 1)
printf ("INSERTING SAFEPOINTS\n");
if (cfg->verbose_level > 2)
mono_print_code (cfg, "BEFORE SAFEPOINTS");
/* if the method doesn't contain
* (1) a call (so it's a leaf method)
* (2) and no loops
* we can skip the GC safepoint on method entry. */
gboolean requires_safepoint = cfg->has_calls;
for (bb = cfg->bb_entry->next_bb; bb; bb = bb->next_bb) {
if (bb->loop_body_start || (bb->flags & BB_EXCEPTION_HANDLER)) {
requires_safepoint = TRUE;
insert_safepoint (cfg, bb);
}
}
if (requires_safepoint)
insert_safepoint (cfg, cfg->bb_entry);
if (cfg->verbose_level > 2)
mono_print_code (cfg, "AFTER SAFEPOINTS");
}
static void
mono_insert_branches_between_bblocks (MonoCompile *cfg)
{
MonoBasicBlock *bb;
/* Add branches between non-consecutive bblocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
/* we are careful when inverting, since bugs like #59580
* could show up when dealing with NaNs.
*/
if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
bb->last_ins->inst_false_bb = tmp;
bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
} else {
MonoInst *inst = (MonoInst *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
inst->opcode = OP_BR;
inst->inst_target_bb = bb->last_ins->inst_false_bb;
mono_bblock_add_inst (bb, inst);
}
}
}
if (cfg->verbose_level >= 4) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MonoInst *tree = bb->code;
g_print ("DUMP BLOCK %d:\n", bb->block_num);
if (!tree)
continue;
for (; tree; tree = tree->next) {
mono_print_ins_index (-1, tree);
}
}
}
/* FIXME: */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->max_vreg = cfg->next_vreg;
}
}
static G_GNUC_UNUSED void
remove_empty_finally_pass (MonoCompile *cfg)
{
MonoBasicBlock *bb;
MonoInst *ins;
gboolean remove_call_handler = FALSE;
// FIXME: other configurations
if (!cfg->llvm_only)
return;
for (int i = 0; i < cfg->header->num_clauses; ++i) {
MonoExceptionClause *clause = &cfg->header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
MonoInst *first, *last;
bb = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (bb);
/* Support only 1 bb for now */
first = mono_bb_first_inst (bb, 0);
if (first->opcode != OP_START_HANDLER)
break;
gboolean empty = TRUE;
while (TRUE) {
if (bb->out_count > 1) {
empty = FALSE;
break;
}
if (bb->flags & BB_HAS_SIDE_EFFECTS) {
empty = FALSE;
break;
}
if (bb->out_count == 0)
break;
if (mono_bb_last_inst (bb, 0)->opcode == OP_ENDFINALLY)
break;
bb = bb->out_bb [0];
}
if (empty) {
/*
* Avoid doing this in nested clauses, because it might mess up the EH code generated by
* the llvm backend.
*/
for (int j = 0; j < cfg->header->num_clauses; ++j) {
MonoExceptionClause *clause2 = &cfg->header->clauses [j];
if (i != j && MONO_OFFSET_IN_CLAUSE (clause2, clause->handler_offset))
empty = FALSE;
}
}
if (empty) {
/* Nullify OP_START_HANDLER */
NULLIFY_INS (first);
last = mono_bb_last_inst (bb, 0);
if (last->opcode == OP_ENDFINALLY)
NULLIFY_INS (last);
if (cfg->verbose_level > 1)
g_print ("removed empty finally clause %d.\n", i);
/* Mark the handler bb as not used anymore */
bb = cfg->cil_offset_to_bb [clause->handler_offset];
bb->flags &= ~BB_EXCEPTION_HANDLER;
cfg->clause_is_dead [i] = TRUE;
remove_call_handler = TRUE;
}
}
}
if (remove_call_handler) {
/* Remove OP_CALL_HANDLER opcodes pointing to the removed finally blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
MONO_BB_FOR_EACH_INS (bb, ins) {
if (ins->opcode == OP_CALL_HANDLER && ins->inst_target_bb && !(ins->inst_target_bb->flags & BB_EXCEPTION_HANDLER)) {
NULLIFY_INS (ins);
for (MonoInst *ins2 = ins->next; ins2; ins2 = ins2->next)
NULLIFY_INS (ins2);
break;
}
}
}
}
}
static void
init_backend (MonoBackend *backend)
{
#ifdef MONO_ARCH_NEED_GOT_VAR
backend->need_got_var = 1;
#endif
#ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
backend->have_card_table_wb = 1;
#endif
#ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
backend->have_op_generic_class_init = 1;
#endif
#ifdef MONO_ARCH_EMULATE_MUL_DIV
backend->emulate_mul_div = 1;
#endif
#ifdef MONO_ARCH_EMULATE_DIV
backend->emulate_div = 1;
#endif
#if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
backend->emulate_long_shift_opts = 1;
#endif
#ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
backend->have_objc_get_selector = 1;
#endif
#ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
backend->have_generalized_imt_trampoline = 1;
#endif
#ifdef MONO_ARCH_GSHARED_SUPPORTED
backend->gshared_supported = 1;
#endif
if (MONO_ARCH_USE_FPSTACK)
backend->use_fpstack = 1;
// Does the ABI have a volatile non-parameter register, so tailcall
// can pass context to generics or interfaces?
backend->have_volatile_non_param_register = MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER;
#ifdef MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE
backend->have_op_tailcall_membase = 1;
#endif
#ifdef MONO_ARCH_HAVE_OP_TAILCALL_REG
backend->have_op_tailcall_reg = 1;
#endif
#ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
backend->monitor_enter_adjustment = 1;
#else
backend->monitor_enter_adjustment = MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
#endif
#if defined(MONO_ARCH_ILP32)
backend->ilp32 = 1;
#endif
#ifdef MONO_ARCH_NEED_DIV_CHECK
backend->need_div_check = 1;
#endif
#ifdef NO_UNALIGNED_ACCESS
backend->no_unaligned_access = 1;
#endif
#ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
backend->dyn_call_param_area = MONO_ARCH_DYN_CALL_PARAM_AREA;
#endif
#ifdef MONO_ARCH_NO_DIV_WITH_MUL
backend->disable_div_with_mul = 1;
#endif
#ifdef MONO_ARCH_EXPLICIT_NULL_CHECKS
backend->explicit_null_checks = 1;
#endif
#ifdef MONO_ARCH_HAVE_OPTIMIZED_DIV
backend->optimized_div = 1;
#endif
#ifdef MONO_ARCH_FORCE_FLOAT32
backend->force_float32 = 1;
#endif
}
static gboolean
is_simd_supported (MonoCompile *cfg)
{
#ifdef DISABLE_SIMD
return FALSE;
#endif
// FIXME: Clean this up
#ifdef TARGET_WASM
if ((mini_get_cpu_features (cfg) & MONO_CPU_WASM_SIMD) == 0)
return FALSE;
#else
if (cfg->llvm_only)
return FALSE;
#endif
return TRUE;
}
/* Determine how an rgctx is passed to a method */
MonoRgctxAccess
mini_get_rgctx_access_for_method (MonoMethod *method)
{
/* gshared dim methods use an mrgctx */
if (mini_method_is_default_method (method))
return MONO_RGCTX_ACCESS_MRGCTX;
if (mono_method_get_context (method)->method_inst)
return MONO_RGCTX_ACCESS_MRGCTX;
if (method->flags & METHOD_ATTRIBUTE_STATIC || m_class_is_valuetype (method->klass))
return MONO_RGCTX_ACCESS_VTABLE;
return MONO_RGCTX_ACCESS_THIS;
}
/*
* mini_method_compile:
* @method: the method to compile
* @opts: the optimization flags to use
* @flags: compilation flags
* @parts: debug flag
*
* Returns: a MonoCompile* pointer. Caller must check the exception_type
* field in the returned struct to see if compilation succeded.
*/
MonoCompile*
mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index)
{
MonoMethodHeader *header;
MonoMethodSignature *sig;
MonoCompile *cfg;
int i;
gboolean try_generic_shared, try_llvm = FALSE;
MonoMethod *method_to_compile, *method_to_register;
gboolean method_is_gshared = FALSE;
gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
gboolean gsharedvt_method = FALSE;
gboolean interp_entry_only = FALSE;
#ifdef ENABLE_LLVM
gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
#endif
static gboolean verbose_method_inited;
static char **verbose_method_names;
mono_atomic_inc_i32 (&mono_jit_stats.methods_compiled);
MONO_PROFILER_RAISE (jit_begin, (method));
if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
MONO_PROBE_METHOD_COMPILE_BEGIN (method);
gsharedvt_method = is_gsharedvt_method (method);
/*
* In AOT mode, method can be the following:
* - a gsharedvt method.
* - a method inflated with type parameters. This is for ref/partial sharing.
* - a method inflated with concrete types.
*/
if (compile_aot) {
if (is_open_method (method)) {
try_generic_shared = TRUE;
method_is_gshared = TRUE;
} else {
try_generic_shared = FALSE;
}
g_assert (opts & MONO_OPT_GSHARED);
} else {
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
(opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_full (method, FALSE, FALSE, FALSE);
if (mini_is_gsharedvt_sharable_method (method)) {
/*
if (!mono_debug_count ())
try_generic_shared = FALSE;
*/
}
}
/*
if (try_generic_shared && !mono_debug_count ())
try_generic_shared = FALSE;
*/
if (opts & MONO_OPT_GSHARED) {
if (try_generic_shared)
mono_atomic_inc_i32 (&mono_stats.generics_sharable_methods);
else if (mono_method_is_generic_impl (method))
mono_atomic_inc_i32 (&mono_stats.generics_unsharable_methods);
}
#ifdef ENABLE_LLVM
try_llvm = mono_use_llvm || llvm;
#endif
#ifndef MONO_ARCH_FLOAT32_SUPPORTED
opts &= ~MONO_OPT_FLOAT32;
#endif
if (current_backend->force_float32)
/* Force float32 mode on newer platforms */
opts |= MONO_OPT_FLOAT32;
restart_compile:
if (method_is_gshared) {
method_to_compile = method;
} else {
if (try_generic_shared) {
ERROR_DECL (error);
method_to_compile = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
mono_error_assert_ok (error);
} else {
method_to_compile = method;
}
}
cfg = g_new0 (MonoCompile, 1);
cfg->method = method_to_compile;
cfg->mempool = mono_mempool_new ();
cfg->opt = opts;
cfg->run_cctors = run_cctors;
cfg->verbose_level = mini_verbose;
cfg->compile_aot = compile_aot;
cfg->full_aot = full_aot;
cfg->disable_omit_fp = mini_debug_options.disable_omit_fp;
cfg->skip_visibility = method->skip_visibility;
cfg->orig_method = method;
cfg->gen_seq_points = !mini_debug_options.no_seq_points_compact_data || mini_debug_options.gen_sdb_seq_points;
cfg->gen_sdb_seq_points = mini_debug_options.gen_sdb_seq_points;
cfg->llvm_only = (flags & JIT_FLAG_LLVM_ONLY) != 0;
cfg->interp = (flags & JIT_FLAG_INTERP) != 0;
cfg->use_current_cpu = (flags & JIT_FLAG_USE_CURRENT_CPU) != 0;
cfg->self_init = (flags & JIT_FLAG_SELF_INIT) != 0;
cfg->code_exec_only = (flags & JIT_FLAG_CODE_EXEC_ONLY) != 0;
cfg->backend = current_backend;
cfg->jit_mm = jit_mm_for_method (cfg->method);
cfg->mem_manager = m_method_get_mem_manager (cfg->method);
if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC) {
/* We can't have seq points inside gc critical regions */
cfg->gen_seq_points = FALSE;
cfg->gen_sdb_seq_points = FALSE;
}
/* coop requires loop detection to happen */
if (mini_safepoints_enabled ())
cfg->opt |= MONO_OPT_LOOP;
cfg->disable_llvm_implicit_null_checks = mini_debug_options.llvm_disable_implicit_null_checks;
if (cfg->backend->explicit_null_checks || mini_debug_options.explicit_null_checks) {
/* some platforms have null pages, so we can't SIGSEGV */
cfg->explicit_null_checks = TRUE;
cfg->disable_llvm_implicit_null_checks = TRUE;
} else {
cfg->explicit_null_checks = flags & JIT_FLAG_EXPLICIT_NULL_CHECKS;
}
cfg->soft_breakpoints = mini_debug_options.soft_breakpoints;
cfg->check_pinvoke_callconv = mini_debug_options.check_pinvoke_callconv;
cfg->disable_direct_icalls = disable_direct_icalls;
cfg->direct_pinvoke = (flags & JIT_FLAG_DIRECT_PINVOKE) != 0;
cfg->interp_entry_only = interp_entry_only;
if (try_generic_shared)
cfg->gshared = TRUE;
if (cfg->gshared)
cfg->rgctx_access = mini_get_rgctx_access_for_method (cfg->method);
cfg->compile_llvm = try_llvm;
cfg->token_info_hash = g_hash_table_new (NULL, NULL);
if (cfg->compile_aot)
cfg->method_index = aot_method_index;
if (cfg->compile_llvm)
cfg->explicit_null_checks = TRUE;
if (cfg->explicit_null_checks && method->wrapper_type == MONO_WRAPPER_OTHER &&
(mono_marshal_get_wrapper_info (method)->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG ||
mono_marshal_get_wrapper_info (method)->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG)) {
/* These wrappers contain loads/stores which can't fail */
cfg->explicit_null_checks = FALSE;
}
/*
if (!mono_debug_count ())
cfg->opt &= ~MONO_OPT_FLOAT32;
*/
if (!is_simd_supported (cfg))
cfg->opt &= ~MONO_OPT_SIMD;
cfg->r4fp = (cfg->opt & MONO_OPT_FLOAT32) ? 1 : 0;
cfg->r4_stack_type = cfg->r4fp ? STACK_R4 : STACK_R8;
if (cfg->gen_seq_points)
cfg->seq_points = g_ptr_array_new ();
cfg->error = (MonoError*)&cfg->error_value;
error_init (cfg->error);
if (cfg->compile_aot && !try_generic_shared && (method->is_generic || mono_class_is_gtd (method->klass) || method_is_gshared)) {
cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
return cfg;
}
if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
MonoMethodInflated *inflated;
MonoGenericContext *context;
if (gsharedvt_method) {
g_assert (method->is_inflated);
inflated = (MonoMethodInflated*)method;
context = &inflated->context;
/* We are compiling a gsharedvt method directly */
g_assert (compile_aot);
} else {
g_assert (method_to_compile->is_inflated);
inflated = (MonoMethodInflated*)method_to_compile;
context = &inflated->context;
}
mini_init_gsctx (cfg->mempool, context, &cfg->gsctx);
cfg->gsctx_context = context;
cfg->gsharedvt = TRUE;
if (!cfg->llvm_only) {
cfg->disable_llvm = TRUE;
cfg->exception_message = g_strdup ("gsharedvt");
}
}
if (cfg->gshared) {
method_to_register = method_to_compile;
} else {
g_assert (method == method_to_compile);
method_to_register = method;
}
cfg->method_to_register = method_to_register;
ERROR_DECL (err);
sig = mono_method_signature_checked (cfg->method, err);
if (!sig) {
cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
cfg->exception_message = g_strdup (mono_error_get_message (err));
mono_error_cleanup (err);
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
return cfg;
}
header = cfg->header = mono_method_get_header_checked (cfg->method, cfg->error);
if (!header) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
return cfg;
}
if (cfg->llvm_only && cfg->interp && !cfg->interp_entry_only && header->num_clauses) {
cfg->deopt = TRUE;
/* Can't reconstruct inlined state */
cfg->disable_inline = TRUE;
}
#ifdef ENABLE_LLVM
{
static gboolean inited;
if (!inited)
inited = TRUE;
/*
* Check for methods which cannot be compiled by LLVM early, to avoid
* the extra compilation pass.
*/
if (COMPILE_LLVM (cfg)) {
mono_llvm_check_method_supported (cfg);
if (cfg->disable_llvm) {
if (cfg->verbose_level > 0) {
//nm = mono_method_full_name (cfg->method, TRUE);
printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method->klass), method->name, cfg->exception_message);
//g_free (nm);
}
if (cfg->llvm_only) {
g_free (cfg->exception_message);
cfg->disable_aot = TRUE;
return cfg;
}
mono_destroy_compile (cfg);
try_llvm = FALSE;
goto restart_compile;
}
}
}
#endif
cfg->prof_flags = mono_profiler_get_call_instrumentation_flags (cfg->method);
cfg->prof_coverage = mono_profiler_coverage_instrumentation_enabled (cfg->method);
gboolean trace = mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method);
if (trace)
cfg->prof_flags = (MonoProfilerCallInstrumentationFlags)(
MONO_PROFILER_CALL_INSTRUMENTATION_ENTER | MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT |
MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE | MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT);
/* The debugger has no liveness information, so avoid sharing registers/stack slots */
if (mini_debug_options.mdb_optimizations || MONO_CFG_PROFILE_CALL_CONTEXT (cfg)) {
cfg->disable_reuse_registers = TRUE;
cfg->disable_reuse_stack_slots = TRUE;
/*
* This decreases the change the debugger will read registers/stack slots which are
* not yet initialized.
*/
cfg->disable_initlocals_opt = TRUE;
cfg->extend_live_ranges = TRUE;
/* The debugger needs all locals to be on the stack or in a global register */
cfg->disable_vreg_to_lvreg = TRUE;
/* Don't remove unused variables when running inside the debugger since the user
* may still want to view them. */
cfg->disable_deadce_vars = TRUE;
cfg->opt &= ~MONO_OPT_DEADCE;
cfg->opt &= ~MONO_OPT_INLINE;
cfg->opt &= ~MONO_OPT_COPYPROP;
cfg->opt &= ~MONO_OPT_CONSPROP;
/* This is needed for the soft debugger, which doesn't like code after the epilog */
cfg->disable_out_of_line_bblocks = TRUE;
}
mini_gc_init_cfg (cfg);
if (method->wrapper_type == MONO_WRAPPER_OTHER) {
WrapperInfo *info = mono_marshal_get_wrapper_info (method);
if ((info && (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG))) {
cfg->disable_gc_safe_points = TRUE;
/* This is safe, these wrappers only store to the stack */
cfg->gen_write_barriers = FALSE;
}
}
if (COMPILE_LLVM (cfg)) {
cfg->opt |= MONO_OPT_ABCREM;
}
if (!verbose_method_inited) {
char *env = g_getenv ("MONO_VERBOSE_METHOD");
if (env != NULL)
verbose_method_names = g_strsplit (env, ";", -1);
verbose_method_inited = TRUE;
}
if (verbose_method_names) {
int i;
for (i = 0; verbose_method_names [i] != NULL; i++){
const char *name = verbose_method_names [i];
if ((strchr (name, '.') > name) || strchr (name, ':') || strchr (name, '*')) {
MonoMethodDesc *desc;
desc = mono_method_desc_new (name, TRUE);
if (desc) {
if (mono_method_desc_full_match (desc, cfg->method)) {
cfg->verbose_level = 4;
}
mono_method_desc_free (desc);
}
} else {
if (strcmp (cfg->method->name, name) == 0)
cfg->verbose_level = 4;
}
}
}
cfg->intvars = (guint16 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
if (cfg->verbose_level > 0) {
char *method_name;
method_name = mono_method_get_full_name (method);
g_print ("converting %s%s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", cfg->interp_entry_only ? "interp only " : "", method_name);
/*
if (COMPILE_LLVM (cfg))
g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
else if (cfg->gsharedvt)
g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else if (cfg->gshared)
g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
else
g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
*/
g_free (method_name);
}
if (cfg->opt & MONO_OPT_ABCREM)
cfg->opt |= MONO_OPT_SSA;
cfg->rs = mono_regstate_new ();
cfg->next_vreg = cfg->rs->next_vreg;
/* FIXME: Fix SSA to handle branches inside bblocks */
if (cfg->opt & MONO_OPT_SSA)
cfg->enable_extended_bblocks = FALSE;
/*
* FIXME: This confuses liveness analysis because variables which are assigned after
* a branch inside a bblock become part of the kill set, even though the assignment
* might not get executed. This causes the optimize_initlocals pass to delete some
* assignments which are needed.
* Also, the mono_if_conversion pass needs to be modified to recognize the code
* created by this.
*/
//cfg->enable_extended_bblocks = TRUE;
/*
* create MonoInst* which represents arguments and local variables
*/
mono_compile_create_vars (cfg);
mono_cfg_dump_create_context (cfg);
mono_cfg_dump_begin_group (cfg);
MONO_TIME_TRACK (mono_jit_stats.jit_method_to_ir, i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE));
mono_cfg_dump_ir (cfg, "method-to-ir");
if (cfg->gdump_ctx != NULL) {
/* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
mono_insert_nop_in_empty_bb (cfg);
mono_cfg_dump_ir (cfg, "mono_insert_nop_in_empty_bb");
}
if (i < 0) {
if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
if (compile_aot) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
return cfg;
}
mono_destroy_compile (cfg);
try_generic_shared = FALSE;
goto restart_compile;
}
g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
/* cfg contains the details of the failure, so let the caller cleanup */
return cfg;
}
cfg->stat_basic_blocks += cfg->num_bblocks;
if (COMPILE_LLVM (cfg)) {
MonoInst *ins;
/* The IR has to be in SSA form for LLVM */
cfg->opt |= MONO_OPT_SSA;
// FIXME:
if (cfg->ret) {
// Allow SSA on the result value
if (!cfg->interp_entry_only)
cfg->ret->flags &= ~MONO_INST_VOLATILE;
// Add an explicit return instruction referencing the return value
MONO_INST_NEW (cfg, ins, OP_SETRET);
ins->sreg1 = cfg->ret->dreg;
MONO_ADD_INS (cfg->bb_exit, ins);
}
cfg->opt &= ~MONO_OPT_LINEARS;
/* FIXME: */
cfg->opt &= ~MONO_OPT_BRANCH;
}
cfg->after_method_to_ir = TRUE;
/* todo: remove code when we have verified that the liveness for try/catch blocks
* works perfectly
*/
/*
* Currently, this can't be commented out since exception blocks are not
* processed during liveness analysis.
* It is also needed, because otherwise the local optimization passes would
* delete assignments in cases like this:
* r1 <- 1
* <something which throws>
* r1 <- 2
* This also allows SSA to be run on methods containing exception clauses, since
* SSA will ignore variables marked VOLATILE.
*/
MONO_TIME_TRACK (mono_jit_stats.jit_liveness_handle_exception_clauses, mono_liveness_handle_exception_clauses (cfg));
mono_cfg_dump_ir (cfg, "liveness_handle_exception_clauses");
MONO_TIME_TRACK (mono_jit_stats.jit_handle_out_of_line_bblock, mono_handle_out_of_line_bblock (cfg));
mono_cfg_dump_ir (cfg, "handle_out_of_line_bblock");
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
if (!COMPILE_LLVM (cfg)) {
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_long_opts, mono_decompose_long_opts (cfg));
mono_cfg_dump_ir (cfg, "decompose_long_opts");
}
/* Should be done before branch opts */
if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop, mono_local_cprop (cfg));
mono_cfg_dump_ir (cfg, "local_cprop");
}
if (cfg->flags & MONO_CFG_HAS_TYPE_CHECK) {
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_typechecks, mono_decompose_typechecks (cfg));
if (cfg->gdump_ctx != NULL) {
/* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
mono_insert_nop_in_empty_bb (cfg);
}
mono_cfg_dump_ir (cfg, "decompose_typechecks");
}
/*
* Should be done after cprop which can do strength reduction on
* some of these ops, after propagating immediates.
*/
if (cfg->has_emulated_ops) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_emulate_ops, mono_local_emulate_ops (cfg));
mono_cfg_dump_ir (cfg, "local_emulate_ops");
}
if (cfg->opt & MONO_OPT_BRANCH) {
MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches, mono_optimize_branches (cfg));
mono_cfg_dump_ir (cfg, "optimize_branches");
}
/* This must be done _before_ global reg alloc and _after_ decompose */
MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs, mono_handle_global_vregs (cfg));
mono_cfg_dump_ir (cfg, "handle_global_vregs");
if (cfg->opt & MONO_OPT_DEADCE) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce, mono_local_deadce (cfg));
mono_cfg_dump_ir (cfg, "local_deadce");
}
if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_alias_analysis, mono_local_alias_analysis (cfg));
mono_cfg_dump_ir (cfg, "local_alias_analysis");
}
/* Disable this for LLVM to make the IR easier to handle */
if (!COMPILE_LLVM (cfg)) {
MONO_TIME_TRACK (mono_jit_stats.jit_if_conversion, mono_if_conversion (cfg));
mono_cfg_dump_ir (cfg, "if_conversion");
}
remove_empty_finally_pass (cfg);
if (cfg->llvm_only && cfg->interp && !cfg->method->wrapper_type && !interp_entry_only && !cfg->deopt) {
/* Disable llvm if there are still finally clauses left */
for (int i = 0; i < cfg->header->num_clauses; ++i) {
MonoExceptionClause *clause = &header->clauses [i];
if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY && !cfg->clause_is_dead [i]) {
cfg->exception_message = g_strdup ("finally clause.");
cfg->disable_llvm = TRUE;
break;
}
}
}
mono_threads_safepoint ();
MONO_TIME_TRACK (mono_jit_stats.jit_bb_ordering, mono_bb_ordering (cfg));
mono_cfg_dump_ir (cfg, "bb_ordering");
if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
/*
* we disable some optimizations if there are too many variables
* because JIT time may become too expensive. The actual number needs
* to be tweaked and eventually the non-linear algorithms should be fixed.
*/
cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
cfg->disable_ssa = TRUE;
}
if (cfg->num_varinfo > 10000 && !cfg->llvm_only)
/* Disable llvm for overly complex methods */
cfg->disable_ssa = TRUE;
if (cfg->opt & MONO_OPT_LOOP) {
MONO_TIME_TRACK (mono_jit_stats.jit_compile_dominator_info, mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM));
MONO_TIME_TRACK (mono_jit_stats.jit_compute_natural_loops, mono_compute_natural_loops (cfg));
}
if (mono_threads_are_safepoints_enabled ()) {
MONO_TIME_TRACK (mono_jit_stats.jit_insert_safepoints, insert_safepoints (cfg));
mono_cfg_dump_ir (cfg, "insert_safepoints");
}
/* after method_to_ir */
if (parts == 1) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
return cfg;
}
/*
if (header->num_clauses)
cfg->disable_ssa = TRUE;
*/
//#define DEBUGSSA "logic_run"
//#define DEBUGSSA_CLASS "Tests"
#ifdef DEBUGSSA
if (!cfg->disable_ssa) {
mono_local_cprop (cfg);
#ifndef DISABLE_SSA
mono_ssa_compute (cfg);
#endif
}
#else
if (cfg->opt & MONO_OPT_SSA) {
if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
#ifndef DISABLE_SSA
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_compute, mono_ssa_compute (cfg));
mono_cfg_dump_ir (cfg, "ssa_compute");
#endif
if (cfg->verbose_level >= 2) {
print_dfn (cfg);
}
}
}
#endif
/* after SSA translation */
if (parts == 2) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
return cfg;
}
if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
#ifndef DISABLE_SSA
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_cprop, mono_ssa_cprop (cfg));
mono_cfg_dump_ir (cfg, "ssa_cprop");
#endif
}
}
#ifndef DISABLE_SSA
if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
//mono_ssa_strength_reduction (cfg);
if (cfg->opt & MONO_OPT_DEADCE) {
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_deadce, mono_ssa_deadce (cfg));
mono_cfg_dump_ir (cfg, "ssa_deadce");
}
if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM)) {
MONO_TIME_TRACK (mono_jit_stats.jit_perform_abc_removal, mono_perform_abc_removal (cfg));
mono_cfg_dump_ir (cfg, "perform_abc_removal");
}
MONO_TIME_TRACK (mono_jit_stats.jit_ssa_remove, mono_ssa_remove (cfg));
mono_cfg_dump_ir (cfg, "ssa_remove");
MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop2, mono_local_cprop (cfg));
mono_cfg_dump_ir (cfg, "local_cprop2");
MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs2, mono_handle_global_vregs (cfg));
mono_cfg_dump_ir (cfg, "handle_global_vregs2");
if (cfg->opt & MONO_OPT_DEADCE) {
MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce2, mono_local_deadce (cfg));
mono_cfg_dump_ir (cfg, "local_deadce2");
}
if (cfg->opt & MONO_OPT_BRANCH) {
MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches2, mono_optimize_branches (cfg));
mono_cfg_dump_ir (cfg, "optimize_branches2");
}
}
#endif
if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
mono_ssa_loop_invariant_code_motion (cfg);
mono_cfg_dump_ir (cfg, "loop_invariant_code_motion");
/* This removes MONO_INST_FAULT flags too so perform it unconditionally */
if (cfg->opt & MONO_OPT_ABCREM) {
mono_perform_abc_removal (cfg);
mono_cfg_dump_ir (cfg, "abc_removal");
}
}
/* after SSA removal */
if (parts == 3) {
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
return cfg;
}
if (cfg->llvm_only && cfg->gsharedvt)
mono_ssa_remove_gsharedvt (cfg);
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
if (COMPILE_SOFT_FLOAT (cfg))
mono_decompose_soft_float (cfg);
#endif
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_vtype_opts, mono_decompose_vtype_opts (cfg));
if (cfg->flags & MONO_CFG_NEEDS_DECOMPOSE) {
MONO_TIME_TRACK (mono_jit_stats.jit_decompose_array_access_opts, mono_decompose_array_access_opts (cfg));
mono_cfg_dump_ir (cfg, "decompose_array_access_opts");
}
if (cfg->got_var) {
#ifndef MONO_ARCH_GOT_REG
GList *regs;
#endif
int got_reg;
g_assert (cfg->got_var_allocated);
/*
* Allways allocate the GOT var to a register, because keeping it
* in memory will increase the number of live temporaries in some
* code created by inssel.brg, leading to the well known spills+
* branches problem. Testcase: mcs crash in
* System.MonoCustomAttrs:GetCustomAttributes.
*/
#ifdef MONO_ARCH_GOT_REG
got_reg = MONO_ARCH_GOT_REG;
#else
regs = mono_arch_get_global_int_regs (cfg);
g_assert (regs);
got_reg = GPOINTER_TO_INT (regs->data);
g_list_free (regs);
#endif
cfg->got_var->opcode = OP_REGVAR;
cfg->got_var->dreg = got_reg;
cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
}
/*
* Have to call this again to process variables added since the first call.
*/
MONO_TIME_TRACK(mono_jit_stats.jit_liveness_handle_exception_clauses2, mono_liveness_handle_exception_clauses (cfg));
if (cfg->opt & MONO_OPT_LINEARS) {
GList *vars, *regs, *l;
/* fixme: maybe we can avoid to compute livenesss here if already computed ? */
cfg->comp_done &= ~MONO_COMP_LIVENESS;
if (!(cfg->comp_done & MONO_COMP_LIVENESS))
MONO_TIME_TRACK (mono_jit_stats.jit_analyze_liveness, mono_analyze_liveness (cfg));
if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
regs = mono_arch_get_global_int_regs (cfg);
/* Remove the reg reserved for holding the GOT address */
if (cfg->got_var) {
for (l = regs; l; l = l->next) {
if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
regs = g_list_delete_link (regs, l);
break;
}
}
}
MONO_TIME_TRACK (mono_jit_stats.jit_linear_scan, mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs));
mono_cfg_dump_ir (cfg, "linear_scan");
}
}
//mono_print_code (cfg, "");
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
if (!COMPILE_LLVM (cfg)) {
MONO_TIME_TRACK (mono_jit_stats.jit_arch_allocate_vars, mono_arch_allocate_vars (cfg));
mono_cfg_dump_ir (cfg, "arch_allocate_vars");
if (cfg->exception_type)
return cfg;
}
if (cfg->gsharedvt)
mono_allocate_gsharedvt_vars (cfg);
if (!COMPILE_LLVM (cfg)) {
gboolean need_local_opts;
MONO_TIME_TRACK (mono_jit_stats.jit_spill_global_vars, mono_spill_global_vars (cfg, &need_local_opts));
mono_cfg_dump_ir (cfg, "spill_global_vars");
if (need_local_opts || cfg->compile_aot) {
/* To optimize code created by spill_global_vars */
MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop3, mono_local_cprop (cfg));
if (cfg->opt & MONO_OPT_DEADCE)
MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce3, mono_local_deadce (cfg));
mono_cfg_dump_ir (cfg, "needs_local_opts");
}
}
mono_insert_branches_between_bblocks (cfg);
if (COMPILE_LLVM (cfg)) {
#ifdef ENABLE_LLVM
char *nm;
/* The IR has to be in SSA form for LLVM */
if (!(cfg->comp_done & MONO_COMP_SSA)) {
cfg->exception_message = g_strdup ("SSA disabled.");
cfg->disable_llvm = TRUE;
}
if (cfg->flags & MONO_CFG_NEEDS_DECOMPOSE)
mono_decompose_array_access_opts (cfg);
if (!cfg->disable_llvm)
mono_llvm_emit_method (cfg);
if (cfg->disable_llvm) {
if (cfg->verbose_level > 0) {
//nm = mono_method_full_name (cfg->method, TRUE);
printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method->klass), method->name, cfg->exception_message);
//g_free (nm);
}
if (cfg->llvm_only && cfg->interp && !interp_entry_only) {
// If interp support is enabled, restart compilation, generating interp entry code only
interp_entry_only = TRUE;
mono_destroy_compile (cfg);
goto restart_compile;
}
if (cfg->llvm_only) {
cfg->disable_aot = TRUE;
return cfg;
}
mono_destroy_compile (cfg);
try_llvm = FALSE;
goto restart_compile;
}
if (cfg->verbose_level > 0 && !cfg->compile_aot) {
nm = mono_method_get_full_name (cfg->method);
g_print ("LLVM Method %s emitted at %p to %p (code length %d)\n",
nm,
cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len);
g_free (nm);
}
#endif
} else {
MONO_TIME_TRACK (mono_jit_stats.jit_codegen, mono_codegen (cfg));
mono_cfg_dump_ir (cfg, "codegen");
if (cfg->exception_type)
return cfg;
}
if (COMPILE_LLVM (cfg))
mono_atomic_inc_i32 (&mono_jit_stats.methods_with_llvm);
else
mono_atomic_inc_i32 (&mono_jit_stats.methods_without_llvm);
MONO_TIME_TRACK (mono_jit_stats.jit_create_jit_info, cfg->jit_info = create_jit_info (cfg, method_to_compile));
if (cfg->extend_live_ranges) {
/* Extend live ranges to cover the whole method */
for (i = 0; i < cfg->num_varinfo; ++i)
MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
}
MONO_TIME_TRACK (mono_jit_stats.jit_gc_create_gc_map, mini_gc_create_gc_map (cfg));
MONO_TIME_TRACK (mono_jit_stats.jit_save_seq_point_info, mono_save_seq_point_info (cfg, cfg->jit_info));
if (!cfg->compile_aot)
mono_lldb_save_method_info (cfg);
if (cfg->verbose_level >= 2) {
char *id = mono_method_full_name (cfg->method, TRUE);
g_print ("\n*** ASM for %s ***\n", id);
mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
g_print ("***\n\n");
g_free (id);
}
if (!cfg->compile_aot && !(flags & JIT_FLAG_DISCARD_RESULTS)) {
mono_jit_info_table_add (cfg->jit_info);
if (cfg->method->dynamic) {
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
MonoJitDynamicMethodInfo *res;
jit_mm_lock (jit_mm);
g_assert (jit_mm->dynamic_code_hash);
res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (jit_mm->dynamic_code_hash, method);
jit_mm_unlock (jit_mm);
g_assert (res);
res->ji = cfg->jit_info;
}
mono_postprocess_patches_after_ji_publish (cfg);
}
#if 0
if (cfg->gsharedvt)
printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
#endif
/* collect statistics */
#ifndef DISABLE_PERFCOUNTERS
mono_atomic_inc_i32 (&mono_perfcounters->jit_methods);
mono_atomic_fetch_add_i32 (&mono_perfcounters->jit_bytes, header->code_size);
#endif
gint32 code_size_ratio = cfg->code_len;
mono_atomic_fetch_add_i32 (&mono_jit_stats.allocated_code_size, code_size_ratio);
mono_atomic_fetch_add_i32 (&mono_jit_stats.native_code_size, code_size_ratio);
/* FIXME: use an explicit function to read booleans */
if ((gboolean)mono_atomic_load_i32 ((gint32*)&mono_jit_stats.enabled)) {
if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.biggest_method_size)) {
mono_atomic_store_i32 (&mono_jit_stats.biggest_method_size, code_size_ratio);
char *biggest_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
biggest_method = (char*)mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.biggest_method, biggest_method);
g_free (biggest_method);
}
code_size_ratio = (code_size_ratio * 100) / header->code_size;
if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.max_code_size_ratio)) {
mono_atomic_store_i32 (&mono_jit_stats.max_code_size_ratio, code_size_ratio);
char *max_ratio_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
max_ratio_method = (char*)mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.max_ratio_method, max_ratio_method);
g_free (max_ratio_method);
}
}
if (MONO_METHOD_COMPILE_END_ENABLED ())
MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
mono_cfg_dump_close_group (cfg);
return cfg;
}
gboolean
mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
{
int i;
MonoGenericContainer *container;
MonoGenericInst *ginst;
if (mono_class_is_ginst (klass)) {
container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
ginst = mono_class_get_generic_class (klass)->context.class_inst;
} else if (mono_class_is_gtd (klass) && context_used) {
container = mono_class_get_generic_container (klass);
ginst = container->context.class_inst;
} else {
return FALSE;
}
for (i = 0; i < container->type_argc; ++i) {
MonoType *type;
if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
continue;
type = ginst->type_argv [i];
if (mini_type_is_reference (type))
return TRUE;
}
return FALSE;
}
void
mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
{
TryBlockHole *hole = (TryBlockHole *)mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
hole->clause = clause;
hole->start_offset = start - cfg->native_code;
hole->basic_block = bb;
cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
}
void
mono_cfg_set_exception (MonoCompile *cfg, MonoExceptionType type)
{
cfg->exception_type = type;
}
/* Assumes ownership of the MSG argument */
void
mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg)
{
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
mono_error_set_generic_error (cfg->error, "System", "InvalidProgramException", "%s", msg);
}
#endif /* DISABLE_JIT */
gint64 mono_time_track_start ()
{
return mono_100ns_ticks ();
}
/*
* mono_time_track_end:
*
* Uses UnlockedAddDouble () to update \param time.
*/
void mono_time_track_end (gint64 *time, gint64 start)
{
UnlockedAdd64 (time, mono_100ns_ticks () - start);
}
/*
* mono_update_jit_stats:
*
* Only call this function in locked environments to avoid data races.
*/
MONO_NO_SANITIZE_THREAD
void
mono_update_jit_stats (MonoCompile *cfg)
{
mono_jit_stats.allocate_var += cfg->stat_allocate_var;
mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
mono_jit_stats.regvars += cfg->stat_n_regvars;
mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
}
/*
* mono_jit_compile_method_inner:
*
* Main entry point for the JIT.
*/
gpointer
mono_jit_compile_method_inner (MonoMethod *method, int opt, MonoError *error)
{
MonoCompile *cfg;
gpointer code = NULL;
MonoJitInfo *jinfo, *info;
MonoVTable *vtable;
MonoException *ex = NULL;
gint64 start;
MonoMethod *prof_method, *shared;
error_init (error);
start = mono_time_track_start ();
cfg = mini_method_compile (method, opt, JIT_FLAG_RUN_CCTORS, 0, -1);
gint64 jit_time = 0.0;
mono_time_track_end (&jit_time, start);
UnlockedAdd64 (&mono_jit_stats.jit_time, jit_time);
prof_method = cfg->method;
switch (cfg->exception_type) {
case MONO_EXCEPTION_NONE:
break;
case MONO_EXCEPTION_TYPE_LOAD:
case MONO_EXCEPTION_MISSING_FIELD:
case MONO_EXCEPTION_MISSING_METHOD:
case MONO_EXCEPTION_FILE_NOT_FOUND:
case MONO_EXCEPTION_BAD_IMAGE:
case MONO_EXCEPTION_INVALID_PROGRAM: {
/* Throw a type load exception if needed */
if (cfg->exception_ptr) {
ex = mono_class_get_exception_for_failure ((MonoClass *)cfg->exception_ptr);
} else {
if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
ex = mono_get_exception_bad_image_format (cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_INVALID_PROGRAM)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message);
else
g_assert_not_reached ();
}
break;
}
case MONO_EXCEPTION_MONO_ERROR:
// FIXME: MonoError has no copy ctor
g_assert (!is_ok (cfg->error));
ex = mono_error_convert_to_exception (cfg->error);
break;
default:
g_assert_not_reached ();
}
if (ex) {
MONO_PROFILER_RAISE (jit_failed, (method));
mono_destroy_compile (cfg);
mono_error_set_exception_instance (error, ex);
return NULL;
}
if (mono_method_is_generic_sharable (method, FALSE)) {
shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
if (!is_ok (error)) {
MONO_PROFILER_RAISE (jit_failed, (method));
mono_destroy_compile (cfg);
return NULL;
}
} else {
shared = NULL;
}
mono_loader_lock ();
if (mono_stats_method_desc && mono_method_desc_full_match (mono_stats_method_desc, method)) {
g_printf ("Printing runtime stats at method: %s\n", mono_method_get_full_name (method));
mono_runtime_print_stats ();
}
/* Check if some other thread already did the job. In this case, we can
discard the code this thread generated. */
info = mini_lookup_method (method, shared);
if (info) {
code = info->code_start;
discarded_code ++;
discarded_jit_time += jit_time;
}
if (code == NULL) {
MonoJitMemoryManager *jit_mm = (MonoJitMemoryManager*)cfg->jit_mm;
/* The lookup + insert is atomic since this is done inside the domain lock */
jit_code_hash_lock (jit_mm);
mono_internal_hash_table_insert (&jit_mm->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
jit_code_hash_unlock (jit_mm);
code = cfg->native_code;
if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
mono_atomic_inc_i32 (&mono_stats.generics_shared_methods);
if (cfg->gsharedvt)
mono_atomic_inc_i32 (&mono_stats.gsharedvt_methods);
}
jinfo = cfg->jit_info;
/*
* Update global stats while holding a lock, instead of doing many
* mono_atomic_inc_i32 operations during JITting.
*/
mono_update_jit_stats (cfg);
mono_destroy_compile (cfg);
mini_patch_llvm_jit_callees (method, code);
#ifndef DISABLE_JIT
mono_emit_jit_map (jinfo);
mono_emit_jit_dump (jinfo, code);
#endif
mono_loader_unlock ();
if (!is_ok (error))
return NULL;
vtable = mono_class_vtable_checked (method->klass, error);
return_val_if_nok (error, NULL);
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
if (mono_marshal_method_from_wrapper (method)) {
/* Native func wrappers have no method */
/* The profiler doesn't know about wrappers, so pass the original icall method */
MONO_PROFILER_RAISE (jit_done, (mono_marshal_method_from_wrapper (method), jinfo));
}
}
MONO_PROFILER_RAISE (jit_done, (method, jinfo));
if (prof_method != method)
MONO_PROFILER_RAISE (jit_done, (prof_method, jinfo));
if (!mono_runtime_class_init_full (vtable, error))
return NULL;
return MINI_ADDR_TO_FTNPTR (code);
}
/*
* mini_get_underlying_type:
*
* Return the type the JIT will use during compilation.
* Handles: byref, enums, native types, bool/char, ref types, generic sharing.
* For gsharedvt types, it will return the original VAR/MVAR.
*/
MonoType*
mini_get_underlying_type (MonoType *type)
{
return mini_type_get_underlying_type (type);
}
void
mini_jit_init (void)
{
mono_os_mutex_init_recursive (&jit_mutex);
#ifndef DISABLE_JIT
mono_counters_register ("Discarded method code", MONO_COUNTER_JIT | MONO_COUNTER_INT, &discarded_code);
mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &discarded_jit_time);
mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &jinfo_try_holes_size);
mono_counters_register ("JIT/method_to_ir", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_method_to_ir);
mono_counters_register ("JIT/liveness_handle_exception_clauses", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_liveness_handle_exception_clauses);
mono_counters_register ("JIT/handle_out_of_line_bblock", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_out_of_line_bblock);
mono_counters_register ("JIT/decompose_long_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_long_opts);
mono_counters_register ("JIT/decompose_typechecks", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_typechecks);
mono_counters_register ("JIT/local_cprop", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop);
mono_counters_register ("JIT/local_emulate_ops", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_emulate_ops);
mono_counters_register ("JIT/optimize_branches", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_optimize_branches);
mono_counters_register ("JIT/handle_global_vregs", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_global_vregs);
mono_counters_register ("JIT/local_deadce", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce);
mono_counters_register ("JIT/local_alias_analysis", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_alias_analysis);
mono_counters_register ("JIT/if_conversion", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_if_conversion);
mono_counters_register ("JIT/bb_ordering", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_bb_ordering);
mono_counters_register ("JIT/compile_dominator_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_compile_dominator_info);
mono_counters_register ("JIT/compute_natural_loops", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_compute_natural_loops);
mono_counters_register ("JIT/insert_safepoints", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_insert_safepoints);
mono_counters_register ("JIT/ssa_compute", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_compute);
mono_counters_register ("JIT/ssa_cprop", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_cprop);
mono_counters_register ("JIT/ssa_deadce", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_deadce);
mono_counters_register ("JIT/perform_abc_removal", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_perform_abc_removal);
mono_counters_register ("JIT/ssa_remove", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_remove);
mono_counters_register ("JIT/local_cprop2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop2);
mono_counters_register ("JIT/handle_global_vregs2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_global_vregs2);
mono_counters_register ("JIT/local_deadce2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce2);
mono_counters_register ("JIT/optimize_branches2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_optimize_branches2);
mono_counters_register ("JIT/decompose_vtype_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_vtype_opts);
mono_counters_register ("JIT/decompose_array_access_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_array_access_opts);
mono_counters_register ("JIT/liveness_handle_exception_clauses2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_liveness_handle_exception_clauses2);
mono_counters_register ("JIT/analyze_liveness", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_analyze_liveness);
mono_counters_register ("JIT/linear_scan", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_linear_scan);
mono_counters_register ("JIT/arch_allocate_vars", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_arch_allocate_vars);
mono_counters_register ("JIT/spill_global_var", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_spill_global_vars);
mono_counters_register ("JIT/local_cprop3", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop3);
mono_counters_register ("JIT/local_deadce3", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce3);
mono_counters_register ("JIT/codegen", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_codegen);
mono_counters_register ("JIT/create_jit_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_create_jit_info);
mono_counters_register ("JIT/gc_create_gc_map", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_gc_create_gc_map);
mono_counters_register ("JIT/save_seq_point_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_save_seq_point_info);
mono_counters_register ("Total time spent JITting", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_time);
mono_counters_register ("Basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.basic_blocks);
mono_counters_register ("Max basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.max_basic_blocks);
mono_counters_register ("Allocated vars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocate_var);
mono_counters_register ("Code reallocs", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.code_reallocs);
mono_counters_register ("Allocated code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocated_code_size);
mono_counters_register ("Allocated seq points size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocated_seq_points_size);
mono_counters_register ("Inlineable methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlineable_methods);
mono_counters_register ("Inlined methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlined_methods);
mono_counters_register ("Regvars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.regvars);
mono_counters_register ("Locals stack size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.locals_stack_size);
mono_counters_register ("Method cache lookups", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_lookups);
mono_counters_register ("Compiled CIL code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.cil_code_size);
mono_counters_register ("Native code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.native_code_size);
mono_counters_register ("Aliases found", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.alias_found);
mono_counters_register ("Aliases eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.alias_removed);
mono_counters_register ("Aliased loads eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.loads_eliminated);
mono_counters_register ("Aliased stores eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.stores_eliminated);
mono_counters_register ("Optimized immediate divisions", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.optimized_divisions);
current_backend = g_new0 (MonoBackend, 1);
init_backend (current_backend);
#endif
}
#ifndef ENABLE_LLVM
void
mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
{
g_assert_not_reached ();
}
gpointer
mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
{
g_assert_not_reached ();
}
gpointer
mono_llvm_emit_aot_data_aligned (const char *symbol, guint8 *data, int data_len, int align)
{
g_assert_not_reached ();
}
#endif
#if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
void
mono_llvm_cpp_throw_exception (void)
{
g_assert_not_reached ();
}
void
mono_llvm_cpp_catch_exception (MonoLLVMInvokeCallback cb, gpointer arg, gboolean *out_thrown)
{
g_assert_not_reached ();
}
#endif
#ifdef DISABLE_JIT
MonoCompile*
mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index)
{
g_assert_not_reached ();
return NULL;
}
void
mono_destroy_compile (MonoCompile *cfg)
{
g_assert_not_reached ();
}
void
mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
{
g_assert_not_reached ();
}
#else // DISABLE_JIT
guint8*
mini_realloc_code_slow (MonoCompile *cfg, int size)
{
const int EXTRA_CODE_SPACE = 16;
if (cfg->code_len + size > (cfg->code_size - EXTRA_CODE_SPACE)) {
while (cfg->code_len + size > (cfg->code_size - EXTRA_CODE_SPACE))
cfg->code_size = cfg->code_size * 2 + EXTRA_CODE_SPACE;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
cfg->stat_code_reallocs++;
}
return cfg->native_code + cfg->code_len;
}
#endif /* DISABLE_JIT */
gboolean
mini_class_is_system_array (MonoClass *klass)
{
return m_class_get_parent (klass) == mono_defaults.array_class;
}
/*
* mono_target_pagesize:
*
* query pagesize used to determine if an implicit NRE can be used
*/
int
mono_target_pagesize (void)
{
/* We could query the system's pagesize via mono_pagesize (), however there
* are pitfalls: sysconf (3) is called on some posix like systems, and per
* POSIX.1-2008 this function doesn't have to be async-safe. Since this
* function can be called from a signal handler, we simplify things by
* using 4k on all targets. Implicit null-checks with an offset larger than
* 4k are _very_ uncommon, so we don't mind emitting an explicit null-check
* for those cases.
*/
return 4 * 1024;
}
MonoCPUFeatures
mini_get_cpu_features (MonoCompile* cfg)
{
MonoCPUFeatures features = (MonoCPUFeatures)0;
#if !defined(MONO_CROSS_COMPILE)
if (!cfg->compile_aot || cfg->use_current_cpu) {
// detect current CPU features if we are in JIT mode or AOT with use_current_cpu flag.
#if defined(ENABLE_LLVM)
features = mono_llvm_get_cpu_features (); // llvm has a nice built-in API to detect features
#elif defined(TARGET_AMD64) || defined(TARGET_X86)
features = mono_arch_get_cpu_features ();
#endif
}
#endif
#if defined(TARGET_ARM64)
// All Arm64 devices have this set
features |= MONO_CPU_ARM64_BASE;
// This is a standard part of ARMv8-A; see A1.5 in "ARM
// Architecture Reference Manual ARMv8, for ARMv8-A
// architecture profile"
features |= MONO_CPU_ARM64_NEON;
#endif
// apply parameters passed via -mattr
return (features | mono_cpu_features_enabled) & ~mono_cpu_features_disabled;
}
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini.h | /**
* \file
* Copyright 2002-2003 Ximian Inc
* Copyright 2003-2011 Novell Inc
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_MINI_H__
#define __MONO_MINI_H__
#include "config.h"
#include <glib.h>
#include <signal.h>
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include <mono/utils/mono-forward-internal.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/mempool.h>
#include <mono/utils/monobitset.h>
#include <mono/metadata/class.h>
#include <mono/metadata/object.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/domain-internals.h>
#include "mono/metadata/class-internals.h"
#include "mono/metadata/class-init.h"
#include "mono/metadata/object-internals.h"
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/jit-info.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-machine.h>
#include <mono/utils/mono-stack-unwinding.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/atomic.h>
#include <mono/utils/mono-jemalloc.h>
#include <mono/utils/mono-conc-hashtable.h>
#include <mono/utils/mono-signal-handler.h>
#include <mono/utils/ftnptr.h>
#include <mono/metadata/icalls.h>
// Forward declare so that mini-*.h can have pointers to them.
// CallInfo is presently architecture specific.
typedef struct MonoInst MonoInst;
typedef struct CallInfo CallInfo;
typedef struct SeqPointInfo SeqPointInfo;
#include "mini-arch.h"
#include "regalloc.h"
#include "mini-unwind.h"
#include <mono/jit/jit.h>
#include "cfgdump.h"
#include "tiered.h"
#include "mono/metadata/tabledefs.h"
#include "mono/metadata/marshal.h"
#include "mono/metadata/exception.h"
#include "mono/metadata/callspec.h"
#include "mono/metadata/icall-signatures.h"
/*
* The mini code should not have any compile time dependencies on the GC being used, so the same object file from mini/
* can be linked into both mono and mono-sgen.
*/
#if !defined(MONO_DLL_EXPORT) || !defined(_MSC_VER)
#if defined(HAVE_BOEHM_GC) || defined(HAVE_SGEN_GC)
#error "The code in mini/ should not depend on these defines."
#endif
#endif
#ifndef __GNUC__
/*#define __alignof__(a) sizeof(a)*/
#define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
#endif
#if DISABLE_LOGGING
#define MINI_DEBUG(level,limit,code)
#else
#define MINI_DEBUG(level,limit,code) do {if (G_UNLIKELY ((level) >= (limit))) code} while (0)
#endif
#if !defined(DISABLE_TASKLETS) && defined(MONO_ARCH_SUPPORT_TASKLETS)
#if defined(__GNUC__)
#define MONO_SUPPORT_TASKLETS 1
#elif defined(HOST_WIN32)
#define MONO_SUPPORT_TASKLETS 1
// Replace some gnu intrinsics needed for tasklets with MSVC equivalents.
#define __builtin_extract_return_addr(x) x
#define __builtin_return_address(x) _ReturnAddress()
#define __builtin_frame_address(x) _AddressOfReturnAddress()
#endif
#endif
#if ENABLE_LLVM
#define COMPILE_LLVM(cfg) ((cfg)->compile_llvm)
#define LLVM_ENABLED TRUE
#else
#define COMPILE_LLVM(cfg) (0)
#define LLVM_ENABLED FALSE
#endif
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
#define COMPILE_SOFT_FLOAT(cfg) (!COMPILE_LLVM ((cfg)) && mono_arch_is_soft_float ())
#else
#define COMPILE_SOFT_FLOAT(cfg) (0)
#endif
#define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
/* for 32 bit systems */
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define MINI_LS_WORD_IDX 0
#define MINI_MS_WORD_IDX 1
#else
#define MINI_LS_WORD_IDX 1
#define MINI_MS_WORD_IDX 0
#endif
#define MINI_LS_WORD_OFFSET (MINI_LS_WORD_IDX * 4)
#define MINI_MS_WORD_OFFSET (MINI_MS_WORD_IDX * 4)
#define MONO_LVREG_LS(lvreg) ((lvreg) + 1)
#define MONO_LVREG_MS(lvreg) ((lvreg) + 2)
#ifndef DISABLE_AOT
#define MONO_USE_AOT_COMPILER
#endif
//TODO: This is x86/amd64 specific.
#define mono_simd_shuffle_mask(a,b,c,d) ((a) | ((b) << 2) | ((c) << 4) | ((d) << 6))
/* Remap printf to g_print (we use a mix of these in the mini code) */
#ifdef HOST_ANDROID
#define printf g_print
#endif
#define MONO_TYPE_IS_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
#define MONO_TYPE_IS_VECTOR_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_I1 && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
//XXX this ignores if t is byref
#define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
typedef struct
{
MonoClass *klass;
MonoMethod *method;
} MonoClassMethodPair;
typedef struct
{
MonoClass *klass;
MonoMethod *method;
gboolean is_virtual;
} MonoDelegateClassMethodPair;
typedef struct {
MonoJitInfo *ji;
MonoCodeManager *code_mp;
} MonoJitDynamicMethodInfo;
/* An extension of MonoGenericParamFull used in generic sharing */
typedef struct {
MonoGenericParamFull param;
MonoGenericParam *parent;
} MonoGSharedGenericParam;
/* Contains a list of ips which needs to be patched when a method is compiled */
typedef struct {
GSList *list;
} MonoJumpList;
/* Arch-specific */
typedef struct {
int dummy;
} MonoDynCallInfo;
typedef struct {
guint32 index;
MonoExceptionClause *clause;
} MonoLeaveClause;
/*
* Information about a stack frame.
* FIXME This typedef exists only to avoid tons of code rewriting
*/
typedef MonoStackFrameInfo StackFrameInfo;
#if 0
#define mono_bitset_foreach_bit(set,b,n) \
for (b = 0; b < n; b++)\
if (mono_bitset_test_fast(set,b))
#else
#define mono_bitset_foreach_bit(set,b,n) \
for (b = mono_bitset_find_start (set); b < n && b >= 0; b = mono_bitset_find_first (set, b))
#endif
/*
* Pull the list of opcodes
*/
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
a = i,
enum {
#include "mono/cil/opcode.def"
CEE_LASTOP
};
#undef OPDEF
#define MONO_VARINFO(cfg,varnum) (&(cfg)->vars [varnum])
#define MONO_INST_NULLIFY_SREGS(dest) do { \
(dest)->sreg1 = (dest)->sreg2 = (dest)->sreg3 = -1; \
} while (0)
#define MONO_INST_NEW(cfg,dest,op) do { \
(dest) = (MonoInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
(dest)->dreg = -1; \
MONO_INST_NULLIFY_SREGS ((dest)); \
(dest)->cil_code = (cfg)->ip; \
} while (0)
#define MONO_INST_NEW_CALL(cfg,dest,op) do { \
(dest) = (MonoCallInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoCallInst)); \
(dest)->inst.opcode = (op); \
(dest)->inst.dreg = -1; \
MONO_INST_NULLIFY_SREGS (&(dest)->inst); \
(dest)->inst.cil_code = (cfg)->ip; \
} while (0)
#define MONO_ADD_INS(b,inst) do { \
if ((b)->last_ins) { \
(b)->last_ins->next = (inst); \
(inst)->prev = (b)->last_ins; \
(b)->last_ins = (inst); \
} else { \
(b)->code = (b)->last_ins = (inst); \
} \
} while (0)
#define NULLIFY_INS(ins) do { \
(ins)->opcode = OP_NOP; \
(ins)->dreg = -1; \
MONO_INST_NULLIFY_SREGS ((ins)); \
} while (0)
/* Remove INS from BB */
#define MONO_REMOVE_INS(bb,ins) do { \
if ((ins)->prev) \
(ins)->prev->next = (ins)->next; \
if ((ins)->next) \
(ins)->next->prev = (ins)->prev; \
if ((bb)->code == (ins)) \
(bb)->code = (ins)->next; \
if ((bb)->last_ins == (ins)) \
(bb)->last_ins = (ins)->prev; \
} while (0)
/* Remove INS from BB and nullify it */
#define MONO_DELETE_INS(bb,ins) do { \
MONO_REMOVE_INS ((bb), (ins)); \
NULLIFY_INS ((ins)); \
} while (0)
/*
* this is used to determine when some branch optimizations are possible: we exclude FP compares
* because they have weird semantics with NaNs.
*/
#define MONO_IS_COND_BRANCH_OP(ins) (((ins)->opcode >= OP_LBEQ && (ins)->opcode <= OP_LBLT_UN) || ((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_IBEQ && (ins)->opcode <= OP_IBLT_UN))
#define MONO_IS_COND_BRANCH_NOFP(ins) (MONO_IS_COND_BRANCH_OP(ins) && !(((ins)->opcode >= OP_FBEQ) && ((ins)->opcode <= OP_FBLT_UN)))
#define MONO_IS_BRANCH_OP(ins) (MONO_IS_COND_BRANCH_OP(ins) || ((ins)->opcode == OP_BR) || ((ins)->opcode == OP_BR_REG) || ((ins)->opcode == OP_SWITCH))
#define MONO_IS_COND_EXC(ins) ((((ins)->opcode >= OP_COND_EXC_EQ) && ((ins)->opcode <= OP_COND_EXC_LT_UN)) || (((ins)->opcode >= OP_COND_EXC_IEQ) && ((ins)->opcode <= OP_COND_EXC_ILT_UN)))
#define MONO_IS_SETCC(ins) ((((ins)->opcode >= OP_CEQ) && ((ins)->opcode <= OP_CLT_UN)) || (((ins)->opcode >= OP_ICEQ) && ((ins)->opcode <= OP_ICLE_UN)) || (((ins)->opcode >= OP_LCEQ) && ((ins)->opcode <= OP_LCLT_UN)) || (((ins)->opcode >= OP_FCEQ) && ((ins)->opcode <= OP_FCLT_UN)))
#define MONO_HAS_CUSTOM_EMULATION(ins) (((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_FCEQ && (ins)->opcode <= OP_FCLT_UN))
#define MONO_IS_LOAD_MEMBASE(ins) (((ins)->opcode >= OP_LOAD_MEMBASE && (ins)->opcode <= OP_LOADV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_LOAD_I1 && (ins)->opcode <= OP_ATOMIC_LOAD_R8))
#define MONO_IS_STORE_MEMBASE(ins) (((ins)->opcode >= OP_STORE_MEMBASE_REG && (ins)->opcode <= OP_STOREV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_STORE_I1 && (ins)->opcode <= OP_ATOMIC_STORE_R8))
#define MONO_IS_STORE_MEMINDEX(ins) (((ins)->opcode >= OP_STORE_MEMINDEX) && ((ins)->opcode <= OP_STORER8_MEMINDEX))
// This is internal because it is easily confused with any enum or integer.
#define MONO_IS_TAILCALL_OPCODE_INTERNAL(opcode) ((opcode) == OP_TAILCALL || (opcode) == OP_TAILCALL_MEMBASE || (opcode) == OP_TAILCALL_REG)
#define MONO_IS_TAILCALL_OPCODE(call) (MONO_IS_TAILCALL_OPCODE_INTERNAL (call->inst.opcode))
// OP_DYN_CALL is not a MonoCallInst
#define MONO_IS_CALL(ins) (((ins)->opcode >= OP_VOIDCALL && (ins)->opcode <= OP_VCALL2_MEMBASE) || \
MONO_IS_TAILCALL_OPCODE_INTERNAL ((ins)->opcode))
#define MONO_IS_JUMP_TABLE(ins) (((ins)->opcode == OP_JUMP_TABLE) ? TRUE : ((((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : ((ins)->opcode == OP_SWITCH) ? TRUE : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : FALSE)))
#define MONO_JUMP_TABLE_FROM_INS(ins) (((ins)->opcode == OP_JUMP_TABLE) ? (ins)->inst_p0 : (((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH) ? (ins)->inst_p0 : (((ins)->opcode == OP_SWITCH) ? (ins)->inst_p0 : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? (ins)->inst_right->inst_p0 : NULL))))
#define MONO_INS_HAS_NO_SIDE_EFFECT(ins) (mono_ins_no_side_effects ((ins)))
#define MONO_INS_IS_PCONST_NULL(ins) ((ins)->opcode == OP_PCONST && (ins)->inst_p0 == 0)
#define MONO_METHOD_IS_FINAL(m) (((m)->flags & METHOD_ATTRIBUTE_FINAL) || ((m)->klass && (mono_class_get_flags ((m)->klass) & TYPE_ATTRIBUTE_SEALED)))
/* Determine whenever 'ins' represents a load of the 'this' argument */
#define MONO_CHECK_THIS(ins) (mono_method_signature_internal (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
#ifdef MONO_ARCH_SIMD_INTRINSICS
#define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI) || ((ins)->opcode == OP_XPHI))
#define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE))
#define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_ZERO(ins) (((ins)->opcode == OP_VZERO) || ((ins)->opcode == OP_XZERO))
#ifdef TARGET_ARM64
/*
* SIMD is only supported on arm64 when using the LLVM backend. When not using
* the LLVM backend, treat SIMD datatypes as regular value types.
*/
#define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && COMPILE_LLVM (cfg) && m_class_is_simd_type (klass))
#else
#define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && m_class_is_simd_type (klass) && (COMPILE_LLVM (cfg) || mono_type_size (m_class_get_byval_arg (klass), NULL) == 16))
#endif
#else
#define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI))
#define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE))
/*A real MOVE is one that isn't decomposed such as a VMOVE or LMOVE*/
#define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_ZERO(ins) ((ins)->opcode == OP_VZERO)
#define MONO_CLASS_IS_SIMD(cfg, klass) (0)
#endif
#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
(dest)->dreg = alloc_ireg_mp ((cfg)); \
(dest)->sreg1 = (sr1); \
(dest)->sreg2 = (sr2); \
(dest)->inst_imm = (imm); \
(dest)->backend.shift_amount = (shift); \
MONO_ADD_INS ((cfg)->cbb, (dest)); \
} while (0)
#endif
typedef struct MonoInstList MonoInstList;
typedef struct MonoCallInst MonoCallInst;
typedef struct MonoCallArgParm MonoCallArgParm;
typedef struct MonoMethodVar MonoMethodVar;
typedef struct MonoBasicBlock MonoBasicBlock;
typedef struct MonoSpillInfo MonoSpillInfo;
extern MonoCallSpec *mono_jit_trace_calls;
extern MonoMethodDesc *mono_inject_async_exc_method;
extern int mono_inject_async_exc_pos;
extern MonoMethodDesc *mono_break_at_bb_method;
extern int mono_break_at_bb_bb_num;
extern gboolean mono_do_x86_stack_align;
extern int mini_verbose;
extern int valgrind_register;
#define INS_INFO(opcode) (&mini_ins_info [((opcode) - OP_START - 1) * 4])
/* instruction description for use in regalloc/scheduling */
enum {
MONO_INST_DEST = 0,
MONO_INST_SRC1 = 1, /* we depend on the SRCs to be consecutive */
MONO_INST_SRC2 = 2,
MONO_INST_SRC3 = 3,
MONO_INST_LEN = 4,
MONO_INST_CLOB = 5,
/* Unused, commented out to reduce the size of the mdesc tables
MONO_INST_FLAGS,
MONO_INST_COST,
MONO_INST_DELAY,
MONO_INST_RES,
*/
MONO_INST_MAX = 6
};
typedef union MonoInstSpec { // instruction specification
struct {
char dest;
char src1;
char src2;
char src3;
unsigned char len;
char clob;
// char flags;
// char cost;
// char delay;
// char res;
};
struct {
char xdest;
char src [3];
unsigned char xlen;
char xclob;
};
char bytes[MONO_INST_MAX];
} MonoInstSpec;
extern const char mini_ins_info[];
extern const gint8 mini_ins_sreg_counts [];
#ifndef DISABLE_JIT
#define mono_inst_get_num_src_registers(ins) (mini_ins_sreg_counts [(ins)->opcode - OP_START - 1])
#else
#define mono_inst_get_num_src_registers(ins) 0
#endif
#define mono_inst_get_src_registers(ins, regs) (((regs) [0] = (ins)->sreg1), ((regs) [1] = (ins)->sreg2), ((regs) [2] = (ins)->sreg3), mono_inst_get_num_src_registers ((ins)))
#define MONO_BB_FOR_EACH_INS(bb, ins) for ((ins) = (bb)->code; (ins); (ins) = (ins)->next)
#define MONO_BB_FOR_EACH_INS_SAFE(bb, n, ins) for ((ins) = (bb)->code, n = (ins) ? (ins)->next : NULL; (ins); (ins) = (n), (n) = (ins) ? (ins)->next : NULL)
#define MONO_BB_FOR_EACH_INS_REVERSE(bb, ins) for ((ins) = (bb)->last_ins; (ins); (ins) = (ins)->prev)
#define MONO_BB_FOR_EACH_INS_REVERSE_SAFE(bb, p, ins) for ((ins) = (bb)->last_ins, p = (ins) ? (ins)->prev : NULL; (ins); (ins) = (p), (p) = (ins) ? (ins)->prev : NULL)
#define mono_bb_first_ins(bb) (bb)->code
/*
* Iterate through all used registers in the instruction.
* Relies on the existing order of the MONO_INST enum: MONO_INST_{DREG,SREG1,SREG2,SREG3,LEN}
* INS is the instruction, IDX is the register index, REG is the pointer to a register.
*/
#define MONO_INS_FOR_EACH_REG(ins, idx, reg) for ((idx) = INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ' ? MONO_INST_DEST : \
(mono_inst_get_num_src_registers (ins) ? MONO_INST_SRC1 : MONO_INST_LEN); \
(reg) = (idx) == MONO_INST_DEST ? &(ins)->dreg : \
((idx) == MONO_INST_SRC1 ? &(ins)->sreg1 : \
((idx) == MONO_INST_SRC2 ? &(ins)->sreg2 : \
((idx) == MONO_INST_SRC3 ? &(ins)->sreg3 : NULL))), \
idx < MONO_INST_LEN; \
(idx) = (idx) > mono_inst_get_num_src_registers (ins) + (INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ') ? MONO_INST_LEN : (idx) + 1)
struct MonoSpillInfo {
int offset;
};
/*
* Information about a call site for the GC map creation code
*/
typedef struct {
/* The next offset after the call instruction */
int pc_offset;
/* The basic block containing the call site */
MonoBasicBlock *bb;
/*
* The set of variables live at the call site.
* Has length cfg->num_varinfo in bits.
*/
guint8 *liveness;
/*
* List of OP_GC_PARAM_SLOT_LIVENESS_DEF instructions defining the param slots
* used by this call.
*/
GSList *param_slots;
} GCCallSite;
/*
* The IR-level extended basic block.
*
* A basic block can have multiple exits just fine, as long as the point of
* 'departure' is the last instruction in the basic block. Extended basic
* blocks, on the other hand, may have instructions that leave the block
* midstream. The important thing is that they cannot be _entered_
* midstream, ie, execution of a basic block (or extened bb) always start
* at the beginning of the block, never in the middle.
*/
struct MonoBasicBlock {
MonoInst *last_ins;
/* the next basic block in the order it appears in IL */
MonoBasicBlock *next_bb;
/*
* Before instruction selection it is the first tree in the
* forest and the first item in the list of trees. After
* instruction selection it is the first instruction and the
* first item in the list of instructions.
*/
MonoInst *code;
/* unique block number identification */
gint32 block_num;
gint32 dfn;
/* Basic blocks: incoming and outgoing counts and pointers */
/* Each bb should only appear once in each array */
gint16 out_count, in_count;
MonoBasicBlock **in_bb;
MonoBasicBlock **out_bb;
/* Points to the start of the CIL code that initiated this BB */
unsigned char* cil_code;
/* Length of the CIL block */
gint32 cil_length;
/* The offset of the generated code, used for fixups */
int native_offset;
/* The length of the generated code, doesn't include alignment padding */
int native_length;
/* The real native offset, which includes alignment padding too */
int real_native_offset;
int max_offset;
int max_length;
/* Visited and reachable flags */
guint32 flags;
/*
* SSA and loop based flags
*/
MonoBitSet *dominators;
MonoBitSet *dfrontier;
MonoBasicBlock *idom;
GSList *dominated;
/* fast dominator algorithm */
MonoBasicBlock *df_parent, *ancestor, *child, *label;
int size, sdom, idomn;
/* loop nesting and recognition */
GList *loop_blocks;
gint8 nesting;
gint8 loop_body_start;
/*
* Whenever the bblock is rarely executed so it should be emitted after
* the function epilog.
*/
guint out_of_line : 1;
/* Caches the result of uselessness calculation during optimize_branches */
guint not_useless : 1;
/* Whenever the decompose_array_access_opts () pass needs to process this bblock */
guint needs_decompose : 1;
/* Whenever this bblock is extended, ie. it has branches inside it */
guint extended : 1;
/* Whenever this bblock contains a OP_JUMP_TABLE instruction */
guint has_jump_table : 1;
/* Whenever this bblock contains an OP_CALL_HANDLER instruction */
guint has_call_handler : 1;
/* Whenever this bblock starts a try block */
guint try_start : 1;
#ifdef ENABLE_LLVM
/* The offset of the CIL instruction in this bblock which ends a try block */
intptr_t try_end;
#endif
/*
* If this is set, extend the try range started by this bblock by an arch specific
* number of bytes to encompass the end of the previous bblock (e.g. a Monitor.Enter
* call).
*/
guint extend_try_block : 1;
/* use for liveness analysis */
MonoBitSet *gen_set;
MonoBitSet *kill_set;
MonoBitSet *live_in_set;
MonoBitSet *live_out_set;
/* fields to deal with non-empty stack slots at bb boundary */
guint16 out_scount, in_scount;
MonoInst **out_stack;
MonoInst **in_stack;
/* we use that to prevent merging of bblocks covered by different clauses*/
guint real_offset;
GSList *seq_points;
// The MonoInst of the last sequence point for the current basic block.
MonoInst *last_seq_point;
// This will hold a list of last sequence points of incoming basic blocks
MonoInst **pred_seq_points;
guint num_pred_seq_points;
GSList *spill_slot_defs;
/* List of call sites in this bblock sorted by pc_offset */
GSList *gc_callsites;
/*
* If this is not null, the basic block is a try hole for all the clauses
* in the list previous to this element (including the element).
*/
GList *clause_holes;
/*
* The region encodes whether the basic block is inside
* a finally, catch, filter or none of these.
*
* If the value is -1, then it is neither finally, catch nor filter
*
* Otherwise the format is:
*
* Bits: | 0-3 | 4-7 | 8-31
* | | |
* | clause-flags | MONO_REGION | clause-index
*
*/
guint region;
/* The current symbolic register number, used in local register allocation. */
guint32 max_vreg;
};
/* BBlock flags */
enum {
BB_VISITED = 1 << 0,
BB_REACHABLE = 1 << 1,
BB_EXCEPTION_DEAD_OBJ = 1 << 2,
BB_EXCEPTION_UNSAFE = 1 << 3,
BB_EXCEPTION_HANDLER = 1 << 4,
/* for Native Client, mark the blocks that can be jumped to indirectly */
BB_INDIRECT_JUMP_TARGET = 1 << 5 ,
/* Contains code with some side effects */
BB_HAS_SIDE_EFFECTS = 1 << 6,
};
typedef struct MonoMemcpyArgs {
int size, align;
} MonoMemcpyArgs;
typedef enum {
LLVMArgNone,
/* Scalar argument passed by value */
LLVMArgNormal,
/* Only in ainfo->pair_storage */
LLVMArgInIReg,
/* Only in ainfo->pair_storage */
LLVMArgInFPReg,
/* Valuetype passed in 1-2 consecutive register */
LLVMArgVtypeInReg,
LLVMArgVtypeByVal,
LLVMArgVtypeRetAddr, /* On on cinfo->ret */
LLVMArgGSharedVt,
/* Fixed size argument passed to/returned from gsharedvt method by ref */
LLVMArgGsharedvtFixed,
/* Fixed size vtype argument passed to/returned from gsharedvt method by ref */
LLVMArgGsharedvtFixedVtype,
/* Variable sized argument passed to/returned from gsharedvt method by ref */
LLVMArgGsharedvtVariable,
/* Vtype passed/returned as one int array argument */
LLVMArgAsIArgs,
/* Vtype passed as a set of fp arguments */
LLVMArgAsFpArgs,
/*
* Only for returns, a structure which
* consists of floats/doubles.
*/
LLVMArgFpStruct,
LLVMArgVtypeByRef,
/* Vtype returned as an int */
LLVMArgVtypeAsScalar,
/* Address to local vtype passed as argument (using register or stack). */
LLVMArgVtypeAddr,
/*
* On WASM, a one element vtype is passed/returned as a scalar with the same
* type as the element.
* esize is the size of the value.
*/
LLVMArgWasmVtypeAsScalar
} LLVMArgStorage;
typedef struct {
LLVMArgStorage storage;
/*
* Only if storage == ArgVtypeInReg/LLVMArgAsFpArgs.
* This contains how the parts of the vtype are passed.
*/
LLVMArgStorage pair_storage [8];
/*
* Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct.
* If storage == LLVMArgAsFpArgs, this is the number of arguments
* used to pass the value.
* If storage == LLVMArgFpStruct, this is the number of fields
* in the structure.
*/
int nslots;
/* Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct (4/8) */
int esize;
/* Parameter index in the LLVM signature */
int pindex;
MonoType *type;
/* Only if storage == LLVMArgAsFpArgs. Dummy fp args to insert before this arg */
int ndummy_fpargs;
} LLVMArgInfo;
typedef struct {
LLVMArgInfo ret;
/* Whenever there is an rgctx argument */
gboolean rgctx_arg;
/* Whenever there is an IMT argument */
gboolean imt_arg;
/* Whenever there is a dummy extra argument */
gboolean dummy_arg;
/*
* The position of the vret arg in the argument list.
* Only if ret->storage == ArgVtypeRetAddr.
* Should be 0 or 1.
*/
int vret_arg_index;
/* The indexes of various special arguments in the LLVM signature */
int vret_arg_pindex, this_arg_pindex, rgctx_arg_pindex, imt_arg_pindex, dummy_arg_pindex;
/* Inline array of argument info */
/* args [0] is for the this argument if it exists */
LLVMArgInfo args [1];
} LLVMCallInfo;
#define MONO_MAX_SRC_REGS 3
struct MonoInst {
guint16 opcode;
guint8 type; /* stack type */
guint8 flags;
/* used by the register allocator */
gint32 dreg, sreg1, sreg2, sreg3;
MonoInst *next, *prev;
union {
union {
MonoInst *src;
MonoMethodVar *var;
target_mgreg_t const_val;
#if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN)
struct {
gpointer p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P];
} pdata;
#else
gpointer p;
#endif
MonoMethod *method;
MonoMethodSignature *signature;
MonoBasicBlock **many_blocks;
MonoBasicBlock *target_block;
MonoInst **args;
MonoType *vtype;
MonoClass *klass;
int *phi_args;
MonoCallInst *call_inst;
GList *exception_clauses;
const char *exc_name;
} op [2];
gint64 i8const;
double r8const;
} data;
const unsigned char* cil_code; /* for debugging and bblock splitting */
/* used mostly by the backend to store additional info it may need */
union {
gint32 reg3;
gint32 arg_info;
gint32 size;
MonoMemcpyArgs *memcpy_args; /* in OP_MEMSET and OP_MEMCPY */
gpointer data;
gint shift_amount;
gboolean is_pinvoke; /* for variables in the unmanaged marshal format */
gboolean record_cast_details; /* For CEE_CASTCLASS */
MonoInst *spill_var; /* for OP_MOVE_I4_TO_F/F_TO_I4 and OP_FCONV_TO_R8_X */
guint16 source_opcode; /*OP_XCONV_R8_TO_I4 needs to know which op was used to do proper widening*/
int pc_offset; /* OP_GC_LIVERANGE_START/END */
/*
* memory_barrier: MONO_MEMORY_BARRIER_{ACQ,REL,SEQ}
* atomic_load_*: MONO_MEMORY_BARRIER_{ACQ,SEQ}
* atomic_store_*: MONO_MEMORY_BARRIER_{REL,SEQ}
*/
int memory_barrier_kind;
} backend;
MonoClass *klass;
};
struct MonoCallInst {
MonoInst inst;
MonoMethodSignature *signature;
MonoMethod *method;
MonoInst **args;
MonoInst *out_args;
MonoInst *vret_var;
gconstpointer fptr;
MonoJitICallId jit_icall_id;
guint stack_usage;
guint stack_align_amount;
regmask_t used_iregs;
regmask_t used_fregs;
GSList *out_ireg_args;
GSList *out_freg_args;
GSList *outarg_vts;
CallInfo *call_info;
#ifdef ENABLE_LLVM
LLVMCallInfo *cinfo;
int rgctx_arg_reg, imt_arg_reg;
#endif
#ifdef TARGET_ARM
/* See the comment in mini-arm.c!mono_arch_emit_call for RegTypeFP. */
GSList *float_args;
#endif
// Bitfields are at the end to minimize padding for alignment,
// unless there is a placement to increase locality.
guint is_virtual : 1;
// FIXME tailcall field is written after read; prefer MONO_IS_TAILCALL_OPCODE.
guint tailcall : 1;
/* If this is TRUE, 'fptr' points to a MonoJumpInfo instead of an address. */
guint fptr_is_patch : 1;
/*
* If this is true, then the call returns a vtype in a register using the same
* calling convention as OP_CALL.
*/
guint vret_in_reg : 1;
/* Whenever vret_in_reg returns fp values */
guint vret_in_reg_fp : 1;
/* Whenever there is an IMT argument and it is dynamic */
guint dynamic_imt_arg : 1;
/* Whenever there is an RGCTX argument */
guint32 rgctx_reg : 1;
/* Whenever the call will need an unbox trampoline */
guint need_unbox_trampoline : 1;
};
struct MonoCallArgParm {
MonoInst ins;
gint32 size;
gint32 offset;
gint32 offPrm;
};
/*
* flags for MonoInst
* Note: some of the values overlap, because they can't appear
* in the same MonoInst.
*/
enum {
MONO_INST_HAS_METHOD = 1,
MONO_INST_INIT = 1, /* in localloc */
MONO_INST_SINGLE_STEP_LOC = 1, /* in SEQ_POINT */
MONO_INST_IS_DEAD = 2,
MONO_INST_TAILCALL = 4,
MONO_INST_VOLATILE = 4,
MONO_INST_NOTYPECHECK = 4,
MONO_INST_NONEMPTY_STACK = 4, /* in SEQ_POINT */
MONO_INST_UNALIGNED = 8,
MONO_INST_NESTED_CALL = 8, /* in SEQ_POINT */
MONO_INST_CFOLD_TAKEN = 8, /* On branches */
MONO_INST_CFOLD_NOT_TAKEN = 16, /* On branches */
MONO_INST_DEFINITION_HAS_SIDE_EFFECTS = 8,
/* the address of the variable has been taken */
MONO_INST_INDIRECT = 16,
MONO_INST_NORANGECHECK = 16,
/* On loads, the source address can be null */
MONO_INST_FAULT = 32,
/*
* On variables, identifies LMF variables. These variables have a dummy type (int), but
* require stack space for a MonoLMF struct.
*/
MONO_INST_LMF = 32,
/* On loads, the source address points to a constant value */
MONO_INST_INVARIANT_LOAD = 64,
/* On stores, the destination is the stack */
MONO_INST_STACK_STORE = 64,
/* On variables, the variable needs GC tracking */
MONO_INST_GC_TRACK = 128,
/*
* Set on instructions during code emission which make calls, i.e. OP_CALL, OP_THROW.
* backend.pc_offset will be set to the pc offset at the end of the native call instructions.
*/
MONO_INST_GC_CALLSITE = 128,
/* On comparisons, mark the branch following the condition as likely to be taken */
MONO_INST_LIKELY = 128,
MONO_INST_NONULLCHECK = 128,
};
#define inst_c0 data.op[0].const_val
#define inst_c1 data.op[1].const_val
#define inst_i0 data.op[0].src
#define inst_i1 data.op[1].src
#if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN)
#define inst_p0 data.op[0].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1]
#define inst_p1 data.op[1].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1]
#else
#define inst_p0 data.op[0].p
#define inst_p1 data.op[1].p
#endif
#define inst_l data.i8const
#define inst_r data.r8const
#define inst_left data.op[0].src
#define inst_right data.op[1].src
#define inst_newa_len data.op[0].src
#define inst_newa_class data.op[1].klass
/* In _OVF opcodes */
#define inst_exc_name data.op[0].exc_name
#define inst_var data.op[0].var
#define inst_vtype data.op[1].vtype
/* in branch instructions */
#define inst_many_bb data.op[1].many_blocks
#define inst_target_bb data.op[0].target_block
#define inst_true_bb data.op[1].many_blocks[0]
#define inst_false_bb data.op[1].many_blocks[1]
#define inst_basereg sreg1
#define inst_indexreg sreg2
#define inst_destbasereg dreg
#define inst_offset data.op[0].const_val
#define inst_imm data.op[1].const_val
#define inst_call data.op[1].call_inst
#define inst_phi_args data.op[1].phi_args
#define inst_eh_blocks data.op[1].exception_clauses
/* Return the lower 32 bits of the 64 bit immediate in INS */
static inline guint32
ins_get_l_low (MonoInst *ins)
{
return (guint32)(ins->data.i8const & 0xffffffff);
}
/* Return the higher 32 bits of the 64 bit immediate in INS */
static inline guint32
ins_get_l_high (MonoInst *ins)
{
return (guint32)((ins->data.i8const >> 32) & 0xffffffff);
}
static inline void
mono_inst_set_src_registers (MonoInst *ins, int *regs)
{
ins->sreg1 = regs [0];
ins->sreg2 = regs [1];
ins->sreg3 = regs [2];
}
typedef union {
struct {
guint16 tid; /* tree number */
guint16 bid; /* block number */
} pos ;
guint32 abs_pos;
} MonoPosition;
typedef struct {
MonoPosition first_use, last_use;
} MonoLiveRange;
typedef struct MonoLiveRange2 MonoLiveRange2;
struct MonoLiveRange2 {
int from, to;
MonoLiveRange2 *next;
};
typedef struct {
/* List of live ranges sorted by 'from' */
MonoLiveRange2 *range;
MonoLiveRange2 *last_range;
} MonoLiveInterval;
/*
* Additional information about a variable
*/
struct MonoMethodVar {
guint idx; /* inside cfg->varinfo, cfg->vars */
MonoLiveRange range; /* generated by liveness analysis */
MonoLiveInterval *interval; /* generated by liveness analysis */
int reg; /* != -1 if allocated into a register */
int spill_costs;
MonoBitSet *def_in; /* used by SSA */
MonoInst *def; /* used by SSA */
MonoBasicBlock *def_bb; /* used by SSA */
GList *uses; /* used by SSA */
char cpstate; /* used by SSA conditional constant propagation */
/* The native offsets corresponding to the live range of the variable */
gint32 live_range_start, live_range_end;
/*
* cfg->varinfo [idx]->dreg could be replaced for OP_REGVAR, this contains the
* original vreg.
*/
gint32 vreg;
};
/* Generic sharing */
/*
* Flags for which contexts were used in inflating a generic.
*/
enum {
MONO_GENERIC_CONTEXT_USED_CLASS = 1,
MONO_GENERIC_CONTEXT_USED_METHOD = 2
};
enum {
/* Cannot be 0 since this is stored in rgctx slots, and 0 means an unitialized rgctx slot */
MONO_GSHAREDVT_BOX_TYPE_VTYPE = 1,
MONO_GSHAREDVT_BOX_TYPE_REF = 2,
MONO_GSHAREDVT_BOX_TYPE_NULLABLE = 3
};
typedef enum {
MONO_RGCTX_INFO_STATIC_DATA = 0,
MONO_RGCTX_INFO_KLASS = 1,
MONO_RGCTX_INFO_ELEMENT_KLASS = 2,
MONO_RGCTX_INFO_VTABLE = 3,
MONO_RGCTX_INFO_TYPE = 4,
MONO_RGCTX_INFO_REFLECTION_TYPE = 5,
MONO_RGCTX_INFO_METHOD = 6,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE = 7,
MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER = 8,
MONO_RGCTX_INFO_CLASS_FIELD = 9,
MONO_RGCTX_INFO_METHOD_RGCTX = 10,
MONO_RGCTX_INFO_METHOD_CONTEXT = 11,
MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK = 12,
MONO_RGCTX_INFO_METHOD_DELEGATE_CODE = 13,
MONO_RGCTX_INFO_CAST_CACHE = 14,
MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE = 15,
MONO_RGCTX_INFO_VALUE_SIZE = 16,
/* +1 to avoid zero values in rgctx slots */
MONO_RGCTX_INFO_FIELD_OFFSET = 17,
/* Either the code for a gsharedvt method, or the address for a gsharedvt-out trampoline for the method */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE = 18,
/* Same for virtual calls */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT = 19,
/* Same for calli, associated with a signature */
MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI = 20,
MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI = 21,
/* One of MONO_GSHAREDVT_BOX_TYPE */
MONO_RGCTX_INFO_CLASS_BOX_TYPE = 22,
/* Resolves to a MonoGSharedVtMethodRuntimeInfo */
MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO = 23,
MONO_RGCTX_INFO_LOCAL_OFFSET = 24,
MONO_RGCTX_INFO_MEMCPY = 25,
MONO_RGCTX_INFO_BZERO = 26,
/* The address of Nullable<T>.Box () */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_NULLABLE_CLASS_BOX = 27,
MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX = 28,
/* MONO_PATCH_INFO_VCALL_METHOD */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_VIRT_METHOD_CODE = 29,
/*
* MONO_PATCH_INFO_VCALL_METHOD
* Same as MONO_RGCTX_INFO_CLASS_BOX_TYPE, but for the class
* which implements the method.
*/
MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE = 30,
/* Resolve to 2 (TRUE) or 1 (FALSE) */
MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS = 31,
/* The MonoDelegateTrampInfo instance */
MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO = 32,
/* Same as MONO_PATCH_INFO_METHOD_FTNDESC */
MONO_RGCTX_INFO_METHOD_FTNDESC = 33,
/* mono_type_size () for a class */
MONO_RGCTX_INFO_CLASS_SIZEOF = 34,
/* The InterpMethod for a method */
MONO_RGCTX_INFO_INTERP_METHOD = 35,
/* The llvmonly interp entry for a method */
MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY = 36
} MonoRgctxInfoType;
/* How an rgctx is passed to a method */
typedef enum {
MONO_RGCTX_ACCESS_NONE = 0,
/* Loaded from this->vtable->rgctx */
MONO_RGCTX_ACCESS_THIS = 1,
/* Loaded from an additional mrgctx argument */
MONO_RGCTX_ACCESS_MRGCTX = 2,
/* Loaded from an additional vtable argument */
MONO_RGCTX_ACCESS_VTABLE = 3
} MonoRgctxAccess;
typedef struct _MonoRuntimeGenericContextInfoTemplate {
MonoRgctxInfoType info_type;
gpointer data;
struct _MonoRuntimeGenericContextInfoTemplate *next;
} MonoRuntimeGenericContextInfoTemplate;
typedef struct {
MonoClass *next_subclass;
MonoRuntimeGenericContextInfoTemplate *infos;
GSList *method_templates;
} MonoRuntimeGenericContextTemplate;
typedef struct {
MonoVTable *class_vtable; /* must be the first element */
MonoGenericInst *method_inst;
gpointer infos [MONO_ZERO_LEN_ARRAY];
} MonoMethodRuntimeGenericContext;
/* MONO_ABI_SIZEOF () would include the 'infos' field as well */
#define MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT (TARGET_SIZEOF_VOID_P * 2)
#define MONO_RGCTX_SLOT_MAKE_RGCTX(i) (i)
#define MONO_RGCTX_SLOT_MAKE_MRGCTX(i) ((i) | 0x80000000)
#define MONO_RGCTX_SLOT_INDEX(s) ((s) & 0x7fffffff)
#define MONO_RGCTX_SLOT_IS_MRGCTX(s) (((s) & 0x80000000) ? TRUE : FALSE)
#define MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET -2
typedef struct {
MonoMethod *method;
MonoRuntimeGenericContextInfoTemplate *entries;
int num_entries, count_entries;
} MonoGSharedVtMethodInfo;
/* This is used by gsharedvt methods to allocate locals and compute local offsets */
typedef struct {
int locals_size;
/*
* The results of resolving the entries in MOonGSharedVtMethodInfo->entries.
* We use this instead of rgctx slots since these can be loaded using a load instead
* of a call to an rgctx fetch trampoline.
*/
gpointer entries [MONO_ZERO_LEN_ARRAY];
} MonoGSharedVtMethodRuntimeInfo;
typedef struct
{
MonoClass *klass;
MonoMethod *invoke;
MonoMethod *method;
MonoMethodSignature *invoke_sig;
MonoMethodSignature *sig;
gpointer method_ptr;
gpointer invoke_impl;
gpointer impl_this;
gpointer impl_nothis;
gboolean need_rgctx_tramp;
} MonoDelegateTrampInfo;
/*
* A function descriptor, which is a function address + argument pair.
* In llvm-only mode, these are used instead of trampolines to pass
* extra arguments to runtime functions/methods.
*/
typedef struct
{
gpointer addr;
gpointer arg;
MonoMethod *method;
/* Tagged InterpMethod* */
gpointer interp_method;
} MonoFtnDesc;
typedef enum {
#define PATCH_INFO(a,b) MONO_PATCH_INFO_ ## a,
#include "patch-info.h"
#undef PATCH_INFO
MONO_PATCH_INFO_NUM
} MonoJumpInfoType;
typedef struct MonoJumpInfoRgctxEntry MonoJumpInfoRgctxEntry;
typedef struct MonoJumpInfo MonoJumpInfo;
typedef struct MonoJumpInfoGSharedVtCall MonoJumpInfoGSharedVtCall;
// Subset of MonoJumpInfo.
typedef struct MonoJumpInfoTarget {
MonoJumpInfoType type;
gconstpointer target;
} MonoJumpInfoTarget;
// This ordering is mimiced in MONO_JIT_ICALLS.
typedef enum {
MONO_TRAMPOLINE_JIT = 0,
MONO_TRAMPOLINE_JUMP = 1,
MONO_TRAMPOLINE_RGCTX_LAZY_FETCH = 2,
MONO_TRAMPOLINE_AOT = 3,
MONO_TRAMPOLINE_AOT_PLT = 4,
MONO_TRAMPOLINE_DELEGATE = 5,
MONO_TRAMPOLINE_VCALL = 6,
MONO_TRAMPOLINE_NUM = 7,
} MonoTrampolineType;
// Assuming MONO_TRAMPOLINE_JIT / MONO_JIT_ICALL_generic_trampoline_jit are first.
#if __cplusplus
g_static_assert (MONO_TRAMPOLINE_JIT == 0);
#endif
#define mono_trampoline_type_to_jit_icall_id(a) ((a) + MONO_JIT_ICALL_generic_trampoline_jit)
#define mono_jit_icall_id_to_trampoline_type(a) ((MonoTrampolineType)((a) - MONO_JIT_ICALL_generic_trampoline_jit))
/* These trampolines return normally to their caller */
#define MONO_TRAMPOLINE_TYPE_MUST_RETURN(t) \
((t) == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
/* These trampolines receive an argument directly in a register */
#define MONO_TRAMPOLINE_TYPE_HAS_ARG(t) \
(FALSE)
/* optimization flags */
#define OPTFLAG(id,shift,name,descr) MONO_OPT_ ## id = 1 << shift,
enum {
#include "optflags-def.h"
MONO_OPT_LAST
};
/*
* This structure represents a JIT backend.
*/
typedef struct {
guint have_card_table_wb : 1;
guint have_op_generic_class_init : 1;
guint emulate_mul_div : 1;
guint emulate_div : 1;
guint emulate_long_shift_opts : 1;
guint have_objc_get_selector : 1;
guint have_generalized_imt_trampoline : 1;
gboolean have_op_tailcall_membase : 1;
gboolean have_op_tailcall_reg : 1;
gboolean have_volatile_non_param_register : 1;
guint gshared_supported : 1;
guint use_fpstack : 1;
guint ilp32 : 1;
guint need_got_var : 1;
guint need_div_check : 1;
guint no_unaligned_access : 1;
guint disable_div_with_mul : 1;
guint explicit_null_checks : 1;
guint optimized_div : 1;
int monitor_enter_adjustment;
int dyn_call_param_area;
} MonoBackend;
/* Flags for mini_method_compile () */
typedef enum {
/* Whenever to run cctors during JITting */
JIT_FLAG_RUN_CCTORS = (1 << 0),
/* Whenever this is an AOT compilation */
JIT_FLAG_AOT = (1 << 1),
/* Whenever this is a full AOT compilation */
JIT_FLAG_FULL_AOT = (1 << 2),
/* Whenever to compile with LLVM */
JIT_FLAG_LLVM = (1 << 3),
/* Whenever to disable direct calls to icall functions */
JIT_FLAG_NO_DIRECT_ICALLS = (1 << 4),
/* Emit explicit null checks */
JIT_FLAG_EXPLICIT_NULL_CHECKS = (1 << 5),
/* Whenever to compile in llvm-only mode */
JIT_FLAG_LLVM_ONLY = (1 << 6),
/* Whenever calls to pinvoke functions are made directly */
JIT_FLAG_DIRECT_PINVOKE = (1 << 7),
/* Whenever this is a compile-all run and the result should be discarded */
JIT_FLAG_DISCARD_RESULTS = (1 << 8),
/* Whenever to generate code which can work with the interpreter */
JIT_FLAG_INTERP = (1 << 9),
/* Allow AOT to use all current CPU instructions */
JIT_FLAG_USE_CURRENT_CPU = (1 << 10),
/* Generate code to self-init the method for AOT */
JIT_FLAG_SELF_INIT = (1 << 11),
/* Assume code memory is exec only */
JIT_FLAG_CODE_EXEC_ONLY = (1 << 12),
} JitFlags;
/* Bit-fields in the MonoBasicBlock.region */
#define MONO_REGION_TRY 0
#define MONO_REGION_FINALLY 16
#define MONO_REGION_CATCH 32
#define MONO_REGION_FAULT 64
#define MONO_REGION_FILTER 128
#define MONO_BBLOCK_IS_IN_REGION(bblock, regtype) (((bblock)->region & (0xf << 4)) == (regtype))
#define MONO_REGION_FLAGS(region) ((region) & 0x7)
#define MONO_REGION_CLAUSE_INDEX(region) (((region) >> 8) - 1)
#define get_vreg_to_inst(cfg, vreg) ((vreg) < (cfg)->vreg_to_inst_len ? (cfg)->vreg_to_inst [(vreg)] : NULL)
#define vreg_is_volatile(cfg, vreg) (G_UNLIKELY (get_vreg_to_inst ((cfg), (vreg)) && (get_vreg_to_inst ((cfg), (vreg))->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))))
#define vreg_is_ref(cfg, vreg) ((vreg) < (cfg)->vreg_is_ref_len ? (cfg)->vreg_is_ref [(vreg)] : 0)
#define vreg_is_mp(cfg, vreg) ((vreg) < (cfg)->vreg_is_mp_len ? (cfg)->vreg_is_mp [(vreg)] : 0)
/*
* Control Flow Graph and compilation unit information
*/
typedef struct {
MonoMethod *method;
MonoMethodHeader *header;
MonoMemPool *mempool;
MonoInst **varinfo;
MonoMethodVar *vars;
MonoInst *ret;
MonoBasicBlock *bb_entry;
MonoBasicBlock *bb_exit;
MonoBasicBlock *bb_init;
MonoBasicBlock **bblocks;
MonoBasicBlock **cil_offset_to_bb;
MonoMemPool *state_pool; /* used by instruction selection */
MonoBasicBlock *cbb; /* used by instruction selection */
MonoInst *prev_ins; /* in decompose */
MonoJumpInfo *patch_info;
MonoJitInfo *jit_info;
MonoJitDynamicMethodInfo *dynamic_info;
guint num_bblocks, max_block_num;
guint locals_start;
guint num_varinfo; /* used items in varinfo */
guint varinfo_count; /* total storage in varinfo */
gint stack_offset;
gint max_ireg;
gint cil_offset_to_bb_len;
MonoRegState *rs;
MonoSpillInfo *spill_info [16]; /* machine register spills */
gint spill_count;
gint spill_info_len [16];
/* unsigned char *cil_code; */
MonoInst *got_var; /* Global Offset Table variable */
MonoInst **locals;
/* Variable holding the mrgctx/vtable address for gshared methods */
MonoInst *rgctx_var;
MonoInst **args;
MonoType **arg_types;
MonoMethod *current_method; /* The method currently processed by method_to_ir () */
MonoMethod *method_to_register; /* The method to register in JIT info tables */
MonoGenericContext *generic_context;
MonoInst *this_arg;
MonoBackend *backend;
/*
* This variable represents the hidden argument holding the vtype
* return address. If the method returns something other than a vtype, or
* the vtype is returned in registers this is NULL.
*/
MonoInst *vret_addr;
/*
* This is used to initialize the cil_code field of MonoInst's.
*/
const unsigned char *ip;
struct MonoAliasingInformation *aliasing_info;
/* A hashtable of region ID-> SP var mappings */
/* An SP var is a place to store the stack pointer (used by handlers)*/
/*
* FIXME We can potentially get rid of this, since it was mainly used
* for hijacking return address for handler.
*/
GHashTable *spvars;
/*
* A hashtable of region ID -> EX var mappings
* An EX var stores the exception object passed to catch/filter blocks
* For finally blocks, it is set to TRUE if we should throw an abort
* once the execution of the finally block is over.
*/
GHashTable *exvars;
GList *ldstr_list; /* used by AOT */
guint real_offset;
GHashTable *cbb_hash;
/* The current virtual register number */
guint32 next_vreg;
MonoRgctxAccess rgctx_access;
MonoGenericSharingContext gsctx;
MonoGenericContext *gsctx_context;
MonoGSharedVtMethodInfo *gsharedvt_info;
gpointer jit_mm;
MonoMemoryManager *mem_manager;
/* Points to the gsharedvt locals area at runtime */
MonoInst *gsharedvt_locals_var;
/* The localloc instruction used to initialize gsharedvt_locals_var */
MonoInst *gsharedvt_locals_var_ins;
/* Points to a MonoGSharedVtMethodRuntimeInfo at runtime */
MonoInst *gsharedvt_info_var;
/* For native-to-managed wrappers, CEE_MONO_JIT_(AT|DE)TACH opcodes */
MonoInst *orig_domain_var;
MonoInst *lmf_var;
MonoInst *lmf_addr_var;
MonoInst *il_state_var;
MonoInst *stack_inbalance_var;
unsigned char *cil_start;
unsigned char *native_code;
guint code_size;
guint code_len;
guint prolog_end;
guint epilog_begin;
guint epilog_end;
regmask_t used_int_regs;
guint32 opt;
guint32 flags;
guint32 comp_done;
guint32 verbose_level;
guint32 stack_usage;
guint32 param_area;
guint32 frame_reg;
gint32 sig_cookie;
guint disable_aot : 1;
guint disable_ssa : 1;
guint disable_llvm : 1;
guint enable_extended_bblocks : 1;
guint run_cctors : 1;
guint need_lmf_area : 1;
guint compile_aot : 1;
guint full_aot : 1;
guint compile_llvm : 1;
guint got_var_allocated : 1;
guint ret_var_is_local : 1;
guint ret_var_set : 1;
guint unverifiable : 1;
guint skip_visibility : 1;
guint disable_llvm_implicit_null_checks : 1;
guint disable_reuse_registers : 1;
guint disable_reuse_stack_slots : 1;
guint disable_reuse_ref_stack_slots : 1;
guint disable_ref_noref_stack_slot_share : 1;
guint disable_initlocals_opt : 1;
guint disable_initlocals_opt_refs : 1;
guint disable_omit_fp : 1;
guint disable_vreg_to_lvreg : 1;
guint disable_deadce_vars : 1;
guint disable_out_of_line_bblocks : 1;
guint disable_direct_icalls : 1;
guint disable_gc_safe_points : 1;
guint direct_pinvoke : 1;
guint create_lmf_var : 1;
/*
* When this is set, the code to push/pop the LMF from the LMF stack is generated as IR
* instead of being generated in emit_prolog ()/emit_epilog ().
*/
guint lmf_ir : 1;
guint gen_write_barriers : 1;
guint init_ref_vars : 1;
guint extend_live_ranges : 1;
guint compute_precise_live_ranges : 1;
guint has_got_slots : 1;
guint uses_rgctx_reg : 1;
guint uses_vtable_reg : 1;
guint keep_cil_nops : 1;
guint gen_seq_points : 1;
/* Generate seq points for use by the debugger */
guint gen_sdb_seq_points : 1;
guint explicit_null_checks : 1;
guint compute_gc_maps : 1;
guint soft_breakpoints : 1;
guint arch_eh_jit_info : 1;
guint has_calls : 1;
guint has_emulated_ops : 1;
guint has_indirection : 1;
guint has_atomic_add_i4 : 1;
guint has_atomic_exchange_i4 : 1;
guint has_atomic_cas_i4 : 1;
guint check_pinvoke_callconv : 1;
guint has_unwind_info_for_epilog : 1;
guint disable_inline : 1;
/* Disable inlining into caller */
guint no_inline : 1;
guint gshared : 1;
guint gsharedvt : 1;
guint llvm_only : 1;
guint interp : 1;
guint use_current_cpu : 1;
guint self_init : 1;
guint code_exec_only : 1;
guint interp_entry_only : 1;
guint after_method_to_ir : 1;
guint disable_inline_rgctx_fetch : 1;
guint deopt : 1;
guint8 uses_simd_intrinsics;
int r4_stack_type;
gpointer debug_info;
guint32 lmf_offset;
guint16 *intvars;
MonoProfilerCoverageInfo *coverage_info;
GHashTable *token_info_hash;
MonoCompileArch arch;
guint32 inline_depth;
/* Size of memory reserved for thunks */
int thunk_area;
/* Thunks */
guint8 *thunks;
/* Offset between the start of code and the thunks area */
int thunks_offset;
MonoExceptionType exception_type; /* MONO_EXCEPTION_* */
guint32 exception_data;
char* exception_message;
gpointer exception_ptr;
guint8 * encoded_unwind_ops;
guint32 encoded_unwind_ops_len;
GSList* unwind_ops;
GList* dont_inline;
/* Fields used by the local reg allocator */
void* reginfo;
int reginfo_len;
/* Maps vregs to their associated MonoInst's */
/* vregs with an associated MonoInst are 'global' while others are 'local' */
MonoInst **vreg_to_inst;
/* Size of above array */
guint32 vreg_to_inst_len;
/* Marks vregs which hold a GC ref */
/* FIXME: Use a bitmap */
gboolean *vreg_is_ref;
/* Size of above array */
guint32 vreg_is_ref_len;
/* Marks vregs which hold a managed pointer */
/* FIXME: Use a bitmap */
gboolean *vreg_is_mp;
/* Size of above array */
guint32 vreg_is_mp_len;
/*
* The original method to compile, differs from 'method' when doing generic
* sharing.
*/
MonoMethod *orig_method;
/* Patches which describe absolute addresses embedded into the native code */
GHashTable *abs_patches;
/* Used to implement move_i4_to_f on archs that can't do raw
copy between an ireg and a freg. This is an int32 var.*/
MonoInst *iconv_raw_var;
/* Used to implement fconv_to_r8_x. This is a double (8 bytes) var.*/
MonoInst *fconv_to_r8_x_var;
/*Use to implement simd constructors. This is a vector (16 bytes) var.*/
MonoInst *simd_ctor_var;
/* Used to implement dyn_call */
MonoInst *dyn_call_var;
MonoInst *last_seq_point;
/*
* List of sequence points represented as IL offset+native offset pairs.
* Allocated using glib.
* IL offset can be -1 or 0xffffff to refer to the sequence points
* inside the prolog and epilog used to implement method entry/exit events.
*/
GPtrArray *seq_points;
/* The encoded sequence point info */
struct MonoSeqPointInfo *seq_point_info;
/* Method headers which need to be freed after compilation */
GSList *headers_to_free;
/* Used by AOT */
guint32 got_offset, ex_info_offset, method_info_offset, method_index;
guint32 aot_method_flags;
/* For llvm */
guint32 got_access_count;
gpointer llvmonly_init_cond;
gpointer llvm_dummy_info_var, llvm_info_var;
/* Symbol used to refer to this method in generated assembly */
char *asm_symbol;
char *asm_debug_symbol;
char *llvm_method_name;
int castclass_cache_index;
MonoJitExceptionInfo *llvm_ex_info;
guint32 llvm_ex_info_len;
int llvm_this_reg, llvm_this_offset;
GSList *try_block_holes;
/* DWARF location list for 'this' */
GSList *this_loclist;
/* DWARF location list for 'rgctx_var' */
GSList *rgctx_loclist;
int *gsharedvt_vreg_to_idx;
GSList *signatures;
GSList *interp_in_signatures;
/* GC Maps */
/* The offsets of the locals area relative to the frame pointer */
gint locals_min_stack_offset, locals_max_stack_offset;
/* The current CFA rule */
int cur_cfa_reg, cur_cfa_offset;
/* The final CFA rule at the end of the prolog */
int cfa_reg, cfa_offset;
/* Points to a MonoCompileGC */
gpointer gc_info;
/*
* The encoded GC map along with its size. This contains binary data so it can be saved in an AOT
* image etc, but it requires a 4 byte alignment.
*/
guint8 *gc_map;
guint32 gc_map_size;
/* Error handling */
MonoError* error;
MonoErrorInternal error_value;
/* pointer to context datastructure used for graph dumping */
MonoGraphDumper *gdump_ctx;
gboolean *clause_is_dead;
/* Stats */
int stat_allocate_var;
int stat_locals_stack_size;
int stat_basic_blocks;
int stat_cil_code_size;
int stat_n_regvars;
int stat_inlineable_methods;
int stat_inlined_methods;
int stat_code_reallocs;
MonoProfilerCallInstrumentationFlags prof_flags;
gboolean prof_coverage;
/* For deduplication */
gboolean skip;
} MonoCompile;
#define MONO_CFG_PROFILE(cfg, flag) \
G_UNLIKELY ((cfg)->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ ## flag)
#define MONO_CFG_PROFILE_CALL_CONTEXT(cfg) \
(MONO_CFG_PROFILE (cfg, ENTER_CONTEXT) || MONO_CFG_PROFILE (cfg, LEAVE_CONTEXT))
typedef enum {
MONO_CFG_HAS_ALLOCA = 1 << 0,
MONO_CFG_HAS_CALLS = 1 << 1,
MONO_CFG_HAS_LDELEMA = 1 << 2,
MONO_CFG_HAS_VARARGS = 1 << 3,
MONO_CFG_HAS_TAILCALL = 1 << 4,
MONO_CFG_HAS_FPOUT = 1 << 5, /* there are fp values passed in int registers */
MONO_CFG_HAS_SPILLUP = 1 << 6, /* spill var slots are allocated from bottom to top */
MONO_CFG_HAS_CHECK_THIS = 1 << 7,
MONO_CFG_NEEDS_DECOMPOSE = 1 << 8,
MONO_CFG_HAS_TYPE_CHECK = 1 << 9
} MonoCompileFlags;
typedef enum {
MONO_CFG_USES_SIMD_INTRINSICS = 1 << 0,
MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION = 1 << 1
} MonoSimdIntrinsicsFlags;
typedef struct {
gint32 methods_compiled;
gint32 methods_aot;
gint32 methods_aot_llvm;
gint32 methods_lookups;
gint32 allocate_var;
gint32 cil_code_size;
gint32 native_code_size;
gint32 code_reallocs;
gint32 max_code_size_ratio;
gint32 biggest_method_size;
gint32 allocated_code_size;
gint32 allocated_seq_points_size;
gint32 inlineable_methods;
gint32 inlined_methods;
gint32 basic_blocks;
gint32 max_basic_blocks;
gint32 locals_stack_size;
gint32 regvars;
gint32 generic_virtual_invocations;
gint32 alias_found;
gint32 alias_removed;
gint32 loads_eliminated;
gint32 stores_eliminated;
gint32 optimized_divisions;
gint32 methods_with_llvm;
gint32 methods_without_llvm;
gint32 methods_with_interp;
char *max_ratio_method;
char *biggest_method;
gint64 jit_method_to_ir;
gint64 jit_liveness_handle_exception_clauses;
gint64 jit_handle_out_of_line_bblock;
gint64 jit_decompose_long_opts;
gint64 jit_decompose_typechecks;
gint64 jit_local_cprop;
gint64 jit_local_emulate_ops;
gint64 jit_optimize_branches;
gint64 jit_handle_global_vregs;
gint64 jit_local_deadce;
gint64 jit_local_alias_analysis;
gint64 jit_if_conversion;
gint64 jit_bb_ordering;
gint64 jit_compile_dominator_info;
gint64 jit_compute_natural_loops;
gint64 jit_insert_safepoints;
gint64 jit_ssa_compute;
gint64 jit_ssa_cprop;
gint64 jit_ssa_deadce;
gint64 jit_perform_abc_removal;
gint64 jit_ssa_remove;
gint64 jit_local_cprop2;
gint64 jit_handle_global_vregs2;
gint64 jit_local_deadce2;
gint64 jit_optimize_branches2;
gint64 jit_decompose_vtype_opts;
gint64 jit_decompose_array_access_opts;
gint64 jit_liveness_handle_exception_clauses2;
gint64 jit_analyze_liveness;
gint64 jit_linear_scan;
gint64 jit_arch_allocate_vars;
gint64 jit_spill_global_vars;
gint64 jit_local_cprop3;
gint64 jit_local_deadce3;
gint64 jit_codegen;
gint64 jit_create_jit_info;
gint64 jit_gc_create_gc_map;
gint64 jit_save_seq_point_info;
gint64 jit_time;
gboolean enabled;
} MonoJitStats;
extern MonoJitStats mono_jit_stats;
static inline void
get_jit_stats (gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time)
{
*methods_compiled = mono_jit_stats.methods_compiled;
*cil_code_size_bytes = mono_jit_stats.cil_code_size;
*native_code_size_bytes = mono_jit_stats.native_code_size;
*jit_time = mono_jit_stats.jit_time;
}
guint32
mono_get_exception_count (void);
static inline void
get_exception_stats (guint32 *exception_count)
{
*exception_count = mono_get_exception_count ();
}
/* opcodes: value assigned after all the CIL opcodes */
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
#define MINI_OP(a,b,dest,src1,src2) a,
#define MINI_OP3(a,b,dest,src1,src2,src3) a,
enum {
OP_START = MONO_CEE_LAST - 1,
#include "mini-ops.h"
OP_LAST
};
#undef MINI_OP
#undef MINI_OP3
#if TARGET_SIZEOF_VOID_P == 8
#define OP_PCONST OP_I8CONST
#define OP_DUMMY_PCONST OP_DUMMY_I8CONST
#define OP_PADD OP_LADD
#define OP_PADD_IMM OP_LADD_IMM
#define OP_PSUB_IMM OP_LSUB_IMM
#define OP_PAND_IMM OP_LAND_IMM
#define OP_PXOR_IMM OP_LXOR_IMM
#define OP_PSUB OP_LSUB
#define OP_PMUL OP_LMUL
#define OP_PMUL_IMM OP_LMUL_IMM
#define OP_POR_IMM OP_LOR_IMM
#define OP_PNEG OP_LNEG
#define OP_PCONV_TO_I1 OP_LCONV_TO_I1
#define OP_PCONV_TO_U1 OP_LCONV_TO_U1
#define OP_PCONV_TO_I2 OP_LCONV_TO_I2
#define OP_PCONV_TO_U2 OP_LCONV_TO_U2
#define OP_PCONV_TO_OVF_I1_UN OP_LCONV_TO_OVF_I1_UN
#define OP_PCONV_TO_OVF_I1 OP_LCONV_TO_OVF_I1
#define OP_PBEQ OP_LBEQ
#define OP_PCEQ OP_LCEQ
#define OP_PCLT OP_LCLT
#define OP_PCGT OP_LCGT
#define OP_PCLT_UN OP_LCLT_UN
#define OP_PCGT_UN OP_LCGT_UN
#define OP_PBNE_UN OP_LBNE_UN
#define OP_PBGE_UN OP_LBGE_UN
#define OP_PBLT_UN OP_LBLT_UN
#define OP_PBGE OP_LBGE
#define OP_STOREP_MEMBASE_REG OP_STOREI8_MEMBASE_REG
#define OP_STOREP_MEMBASE_IMM OP_STOREI8_MEMBASE_IMM
#else
#define OP_PCONST OP_ICONST
#define OP_DUMMY_PCONST OP_DUMMY_ICONST
#define OP_PADD OP_IADD
#define OP_PADD_IMM OP_IADD_IMM
#define OP_PSUB_IMM OP_ISUB_IMM
#define OP_PAND_IMM OP_IAND_IMM
#define OP_PXOR_IMM OP_IXOR_IMM
#define OP_PSUB OP_ISUB
#define OP_PMUL OP_IMUL
#define OP_PMUL_IMM OP_IMUL_IMM
#define OP_POR_IMM OP_IOR_IMM
#define OP_PNEG OP_INEG
#define OP_PCONV_TO_I1 OP_ICONV_TO_I1
#define OP_PCONV_TO_U1 OP_ICONV_TO_U1
#define OP_PCONV_TO_I2 OP_ICONV_TO_I2
#define OP_PCONV_TO_U2 OP_ICONV_TO_U2
#define OP_PCONV_TO_OVF_I1_UN OP_ICONV_TO_OVF_I1_UN
#define OP_PCONV_TO_OVF_I1 OP_ICONV_TO_OVF_I1
#define OP_PBEQ OP_IBEQ
#define OP_PCEQ OP_ICEQ
#define OP_PCLT OP_ICLT
#define OP_PCGT OP_ICGT
#define OP_PCLT_UN OP_ICLT_UN
#define OP_PCGT_UN OP_ICGT_UN
#define OP_PBNE_UN OP_IBNE_UN
#define OP_PBGE_UN OP_IBGE_UN
#define OP_PBLT_UN OP_IBLT_UN
#define OP_PBGE OP_IBGE
#define OP_STOREP_MEMBASE_REG OP_STOREI4_MEMBASE_REG
#define OP_STOREP_MEMBASE_IMM OP_STOREI4_MEMBASE_IMM
#endif
/* Opcodes to load/store regsize quantities */
#if defined (MONO_ARCH_ILP32)
#define OP_LOADR_MEMBASE OP_LOADI8_MEMBASE
#define OP_STORER_MEMBASE_REG OP_STOREI8_MEMBASE_REG
#else
#define OP_LOADR_MEMBASE OP_LOAD_MEMBASE
#define OP_STORER_MEMBASE_REG OP_STORE_MEMBASE_REG
#endif
typedef enum {
STACK_INV,
STACK_I4,
STACK_I8,
STACK_PTR,
STACK_R8,
STACK_MP,
STACK_OBJ,
STACK_VTYPE,
STACK_R4,
STACK_MAX
} MonoStackType;
typedef struct {
union {
double r8;
gint32 i4;
gint64 i8;
gpointer p;
MonoClass *klass;
} data;
int type;
} StackSlot;
extern const MonoInstSpec MONO_ARCH_CPU_SPEC [];
#define MONO_ARCH_CPU_SPEC_IDX_COMBINE(a) a ## _idx
#define MONO_ARCH_CPU_SPEC_IDX(a) MONO_ARCH_CPU_SPEC_IDX_COMBINE(a)
extern const guint16 MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC) [];
#define ins_get_spec(op) ((const char*)&MONO_ARCH_CPU_SPEC [MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC)[(op) - OP_LOAD]])
#ifndef DISABLE_JIT
static inline int
ins_get_size (int opcode)
{
return ((guint8 *)ins_get_spec (opcode))[MONO_INST_LEN];
}
guint8*
mini_realloc_code_slow (MonoCompile *cfg, int size);
static inline guint8*
realloc_code (MonoCompile *cfg, int size)
{
const int EXTRA_CODE_SPACE = 16;
const int code_len = cfg->code_len;
if (G_UNLIKELY ((guint)(code_len + size) > (cfg->code_size - EXTRA_CODE_SPACE)))
return mini_realloc_code_slow (cfg, size);
return cfg->native_code + code_len;
}
static inline void
set_code_len (MonoCompile *cfg, int len)
{
g_assert ((guint)len <= cfg->code_size);
cfg->code_len = len;
}
static inline void
set_code_cursor (MonoCompile *cfg, void* void_code)
{
guint8* code = (guint8*)void_code;
g_assert (code <= (cfg->native_code + cfg->code_size));
set_code_len (cfg, code - cfg->native_code);
}
#endif
enum {
MONO_COMP_DOM = 1,
MONO_COMP_IDOM = 2,
MONO_COMP_DFRONTIER = 4,
MONO_COMP_DOM_REV = 8,
MONO_COMP_LIVENESS = 16,
MONO_COMP_SSA = 32,
MONO_COMP_SSA_DEF_USE = 64,
MONO_COMP_REACHABILITY = 128,
MONO_COMP_LOOPS = 256
};
typedef enum {
MONO_GRAPH_CFG = 1,
MONO_GRAPH_DTREE = 2,
MONO_GRAPH_CFG_CODE = 4,
MONO_GRAPH_CFG_SSA = 8,
MONO_GRAPH_CFG_OPTCODE = 16
} MonoGraphOptions;
typedef struct {
guint16 size;
guint16 offset;
guint8 pad;
} MonoJitArgumentInfo;
enum {
BRANCH_NOT_TAKEN,
BRANCH_TAKEN,
BRANCH_UNDEF
};
typedef enum {
CMP_EQ,
CMP_NE,
CMP_LE,
CMP_GE,
CMP_LT,
CMP_GT,
CMP_LE_UN,
CMP_GE_UN,
CMP_LT_UN,
CMP_GT_UN,
CMP_ORD,
CMP_UNORD
} CompRelation;
typedef enum {
CMP_TYPE_L,
CMP_TYPE_I,
CMP_TYPE_F
} CompType;
/* Implicit exceptions */
enum {
MONO_EXC_INDEX_OUT_OF_RANGE,
MONO_EXC_OVERFLOW,
MONO_EXC_ARITHMETIC,
MONO_EXC_DIVIDE_BY_ZERO,
MONO_EXC_INVALID_CAST,
MONO_EXC_NULL_REF,
MONO_EXC_ARRAY_TYPE_MISMATCH,
MONO_EXC_ARGUMENT,
MONO_EXC_ARGUMENT_OUT_OF_RANGE,
MONO_EXC_ARGUMENT_OUT_OF_MEMORY,
MONO_EXC_INTRINS_NUM
};
/*
* Information about a trampoline function.
*/
struct MonoTrampInfo
{
/*
* The native code of the trampoline. Not owned by this structure.
*/
guint8 *code;
guint32 code_size;
/*
* The name of the trampoline which can be used in AOT/xdebug. Owned by this
* structure.
*/
char *name;
/*
* Patches required by the trampoline when aot-ing. Owned by this structure.
*/
MonoJumpInfo *ji;
/*
* Unwind information. Owned by this structure.
*/
GSList *unwind_ops;
MonoJitICallInfo *jit_icall_info;
/*
* The method the trampoline is associated with, if any.
*/
MonoMethod *method;
/*
* Encoded unwind info loaded from AOT images
*/
guint8 *uw_info;
guint32 uw_info_len;
/* Whenever uw_info is owned by this structure */
gboolean owns_uw_info;
};
typedef void (*MonoInstFunc) (MonoInst *tree, gpointer data);
enum {
FILTER_IL_SEQ_POINT = 1 << 0,
FILTER_NOP = 1 << 1,
};
static inline gboolean
mono_inst_filter (MonoInst *ins, int filter)
{
if (!ins || !filter)
return FALSE;
if ((filter & FILTER_IL_SEQ_POINT) && ins->opcode == OP_IL_SEQ_POINT)
return TRUE;
if ((filter & FILTER_NOP) && ins->opcode == OP_NOP)
return TRUE;
return FALSE;
}
static inline MonoInst*
mono_inst_next (MonoInst *ins, int filter)
{
do {
ins = ins->next;
} while (mono_inst_filter (ins, filter));
return ins;
}
static inline MonoInst*
mono_inst_prev (MonoInst *ins, int filter)
{
do {
ins = ins->prev;
} while (mono_inst_filter (ins, filter));
return ins;
}
static inline MonoInst*
mono_bb_first_inst (MonoBasicBlock *bb, int filter)
{
MonoInst *ins = bb->code;
if (mono_inst_filter (ins, filter))
ins = mono_inst_next (ins, filter);
return ins;
}
static inline MonoInst*
mono_bb_last_inst (MonoBasicBlock *bb, int filter)
{
MonoInst *ins = bb->last_ins;
if (mono_inst_filter (ins, filter))
ins = mono_inst_prev (ins, filter);
return ins;
}
/* profiler support */
void mini_add_profiler_argument (const char *desc);
void mini_profiler_emit_enter (MonoCompile *cfg);
void mini_profiler_emit_leave (MonoCompile *cfg, MonoInst *ret);
void mini_profiler_emit_tail_call (MonoCompile *cfg, MonoMethod *target);
void mini_profiler_emit_call_finally (MonoCompile *cfg, MonoMethodHeader *header, unsigned char *ip, guint32 index, MonoExceptionClause *clause);
void mini_profiler_context_enable (void);
gpointer mini_profiler_context_get_this (MonoProfilerCallContext *ctx);
gpointer mini_profiler_context_get_argument (MonoProfilerCallContext *ctx, guint32 pos);
gpointer mini_profiler_context_get_local (MonoProfilerCallContext *ctx, guint32 pos);
gpointer mini_profiler_context_get_result (MonoProfilerCallContext *ctx);
void mini_profiler_context_free_buffer (gpointer buffer);
/* graph dumping */
void mono_cfg_dump_create_context (MonoCompile *cfg);
void mono_cfg_dump_begin_group (MonoCompile *cfg);
void mono_cfg_dump_close_group (MonoCompile *cfg);
void mono_cfg_dump_ir (MonoCompile *cfg, const char *phase_name);
/* helper methods */
MonoInst* mono_find_spvar_for_region (MonoCompile *cfg, int region);
MonoInst* mono_find_exvar_for_offset (MonoCompile *cfg, int offset);
int mono_get_block_region_notry (MonoCompile *cfg, int region);
void mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst);
void mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert);
void mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert);
void mono_verify_bblock (MonoBasicBlock *bb);
void mono_verify_cfg (MonoCompile *cfg);
void mono_constant_fold (MonoCompile *cfg);
MonoInst* mono_constant_fold_ins (MonoCompile *cfg, MonoInst *ins, MonoInst *arg1, MonoInst *arg2, gboolean overwrite);
int mono_eval_cond_branch (MonoInst *branch);
int mono_is_power_of_two (guint32 val);
void mono_cprop_local (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **acp, int acp_size);
MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode);
MonoInst* mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg);
void mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index);
MonoInst* mini_get_int_to_float_spill_area (MonoCompile *cfg);
MonoType* mono_type_from_stack_type (MonoInst *ins);
guint32 mono_alloc_ireg (MonoCompile *cfg);
guint32 mono_alloc_lreg (MonoCompile *cfg);
guint32 mono_alloc_freg (MonoCompile *cfg);
guint32 mono_alloc_preg (MonoCompile *cfg);
guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type);
guint32 mono_alloc_ireg_ref (MonoCompile *cfg);
guint32 mono_alloc_ireg_mp (MonoCompile *cfg);
guint32 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg);
void mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg);
void mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg);
void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to);
void mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to);
gboolean mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2);
void mono_remove_bblock (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_nullify_basic_block (MonoBasicBlock *bb);
void mono_merge_basic_blocks (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *bbn);
void mono_optimize_branches (MonoCompile *cfg);
void mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom);
void mono_print_ins_index (int i, MonoInst *ins);
GString *mono_print_ins_index_strbuf (int i, MonoInst *ins);
void mono_print_ins (MonoInst *ins);
void mono_print_bb (MonoBasicBlock *bb, const char *msg);
void mono_print_code (MonoCompile *cfg, const char *msg);
const char* mono_inst_name (int op);
int mono_op_to_op_imm (int opcode);
int mono_op_imm_to_op (int opcode);
int mono_load_membase_to_load_mem (int opcode);
gboolean mono_op_no_side_effects (int opcode);
gboolean mono_ins_no_side_effects (MonoInst *ins);
guint mono_type_to_load_membase (MonoCompile *cfg, MonoType *type);
guint mono_type_to_store_membase (MonoCompile *cfg, MonoType *type);
guint32 mono_type_to_stloc_coerce (MonoType *type);
guint mini_type_to_stind (MonoCompile* cfg, MonoType *type);
MonoStackType mini_type_to_stack_type (MonoCompile *cfg, MonoType *t);
MonoJitInfo* mini_lookup_method (MonoMethod *method, MonoMethod *shared);
guint32 mono_reverse_branch_op (guint32 opcode);
void mono_disassemble_code (MonoCompile *cfg, guint8 *code, int size, char *id);
MonoJumpInfoTarget mono_call_to_patch (MonoCallInst *call);
void mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip);
void mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target);
void mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation);
void mono_remove_patch_info (MonoCompile *cfg, int ip);
gpointer mono_jit_compile_method_inner (MonoMethod *method, int opt, MonoError *error);
GList *mono_varlist_insert_sorted (MonoCompile *cfg, GList *list, MonoMethodVar *mv, int sort_type);
GList *mono_varlist_sort (MonoCompile *cfg, GList *list, int sort_type);
void mono_analyze_liveness (MonoCompile *cfg);
void mono_analyze_liveness_gc (MonoCompile *cfg);
void mono_linear_scan (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask);
void mono_global_regalloc (MonoCompile *cfg);
void mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks);
MonoCompile *mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index);
void mono_destroy_compile (MonoCompile *cfg);
void mono_empty_compile (MonoCompile *cfg);
MonoJitICallInfo *mono_find_jit_opcode_emulation (int opcode);
void mono_print_ins_index (int i, MonoInst *ins);
void mono_print_ins (MonoInst *ins);
gboolean mini_assembly_can_skip_verification (MonoMethod *method);
MonoInst *mono_get_got_var (MonoCompile *cfg);
void mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset);
void mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to);
MonoInst* mono_emit_jit_icall_id (MonoCompile *cfg, MonoJitICallId jit_icall_id, MonoInst **args);
#define mono_emit_jit_icall(cfg, name, args) (mono_emit_jit_icall_id ((cfg), MONO_JIT_ICALL_ ## name, (args)))
MonoInst* mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args);
MonoInst* mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins);
gboolean mini_should_insert_breakpoint (MonoMethod *method);
int mono_target_pagesize (void);
gboolean mini_class_is_system_array (MonoClass *klass);
void mono_linterval_add_range (MonoCompile *cfg, MonoLiveInterval *interval, int from, int to);
void mono_linterval_print (MonoLiveInterval *interval);
void mono_linterval_print_nl (MonoLiveInterval *interval);
gboolean mono_linterval_covers (MonoLiveInterval *interval, int pos);
gint32 mono_linterval_get_intersect_pos (MonoLiveInterval *i1, MonoLiveInterval *i2);
void mono_linterval_split (MonoCompile *cfg, MonoLiveInterval *interval, MonoLiveInterval **i1, MonoLiveInterval **i2, int pos);
void mono_liveness_handle_exception_clauses (MonoCompile *cfg);
gpointer mono_realloc_native_code (MonoCompile *cfg);
void mono_register_opcode_emulation (int opcode, const char* name, MonoMethodSignature *sig, gpointer func, gboolean no_throw);
void mono_draw_graph (MonoCompile *cfg, MonoGraphOptions draw_options);
void mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst);
void mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb);
void mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_throw);
#ifdef __cplusplus
template <typename T>
inline void
mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, T func, const char *symbol, gboolean no_throw)
{
mini_register_opcode_emulation (opcode, jit_icall_info, name, sig, (gpointer)func, symbol, no_throw);
}
#endif // __cplusplus
void mono_trampolines_init (void);
guint8 * mono_get_trampoline_code (MonoTrampolineType tramp_type);
gpointer mono_create_specific_trampoline (MonoMemoryManager *mem_manager, gpointer arg1, MonoTrampolineType tramp_type, guint32 *code_len);
gpointer mono_create_jump_trampoline (MonoMethod *method,
gboolean add_sync_wrapper,
MonoError *error);
gpointer mono_create_jit_trampoline (MonoMethod *method, MonoError *error);
gpointer mono_create_jit_trampoline_from_token (MonoImage *image, guint32 token);
gpointer mono_create_delegate_trampoline (MonoClass *klass);
MonoDelegateTrampInfo* mono_create_delegate_trampoline_info (MonoClass *klass, MonoMethod *method);
gpointer mono_create_delegate_virtual_trampoline (MonoClass *klass, MonoMethod *method);
gpointer mono_create_rgctx_lazy_fetch_trampoline (guint32 offset);
gpointer mono_create_static_rgctx_trampoline (MonoMethod *m, gpointer addr);
gpointer mono_create_ftnptr_arg_trampoline (gpointer arg, gpointer addr);
guint32 mono_find_rgctx_lazy_fetch_trampoline_by_addr (gconstpointer addr);
gpointer mono_magic_trampoline (host_mgreg_t *regs, guint8 *code, gpointer arg, guint8* tramp);
gpointer mono_delegate_trampoline (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp);
gpointer mono_aot_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info,
guint8* tramp);
gpointer mono_aot_plt_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info,
guint8* tramp);
gconstpointer mono_get_trampoline_func (MonoTrampolineType tramp_type);
gpointer mini_get_vtable_trampoline (MonoVTable *vt, int slot_index);
const char* mono_get_generic_trampoline_simple_name (MonoTrampolineType tramp_type);
const char* mono_get_generic_trampoline_name (MonoTrampolineType tramp_type);
char* mono_get_rgctx_fetch_trampoline_name (int slot);
gpointer mini_get_single_step_trampoline (void);
gpointer mini_get_breakpoint_trampoline (void);
gpointer mini_add_method_trampoline (MonoMethod *m, gpointer compiled_method, gboolean add_static_rgctx_tramp, gboolean add_unbox_tramp);
gboolean mini_jit_info_is_gsharedvt (MonoJitInfo *ji);
gpointer* mini_resolve_imt_method (MonoVTable *vt, gpointer *vtable_slot, MonoMethod *imt_method, MonoMethod **impl_method, gpointer *out_aot_addr,
gboolean *out_need_rgctx_tramp, MonoMethod **variant_iface,
MonoError *error);
void* mono_global_codeman_reserve (int size);
#define mono_global_codeman_reserve(size) (g_cast (mono_global_codeman_reserve ((size))))
void mono_global_codeman_foreach (MonoCodeManagerFunc func, void *user_data);
const char *mono_regname_full (int reg, int bank);
gint32* mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align);
void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb);
MonoInst *mono_branch_optimize_exception_target (MonoCompile *cfg, MonoBasicBlock *bb, const char * exname);
void mono_remove_critical_edges (MonoCompile *cfg);
gboolean mono_is_regsize_var (MonoType *t);
MonoJumpInfo * mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target);
int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass);
int mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method);
void mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2);
void mini_set_inline_failure (MonoCompile *cfg, const char *msg);
void mini_test_tailcall (MonoCompile *cfg, gboolean tailcall);
gboolean mini_should_check_stack_pointer (MonoCompile *cfg);
MonoInst* mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used);
void mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
void mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align);
void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
void mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype);
int mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index);
MonoInst* mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded);
MonoInst* mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
MonoInst* mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
void mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig);
MonoCallInst * mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall,
gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target);
MonoInst* mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
MonoInst* mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr,
MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall);
MonoInst* mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall,
MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg);
MonoInst* mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
MonoMethodSignature *sig, MonoInst **args);
MonoInst* mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target);
MonoInst* mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr);
MonoInst* mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
MonoInst* mini_emit_memory_barrier (MonoCompile *cfg, int kind);
MonoInst* mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value);
void mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value);
MonoInst* mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag);
void mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag);
void mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag);
void mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag);
void mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag);
MonoInst* mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks);
MonoInst* mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized);
MonoInst* mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
MonoInst* mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field);
MonoInst* mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag);
MonoInst* mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used);
MonoMethod* mini_get_memcpy_method (void);
MonoMethod* mini_get_memset_method (void);
int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass);
MonoRgctxAccess mini_get_rgctx_access_for_method (MonoMethod *method);
CompRelation mono_opcode_to_cond (int opcode);
CompType mono_opcode_to_type (int opcode, int cmp_opcode);
CompRelation mono_negate_cond (CompRelation cond);
int mono_op_imm_to_op (int opcode);
void mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins);
void mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins);
MonoUnwindOp *mono_create_unwind_op (int when,
int tag, int reg,
int val);
void mono_emit_unwind_op (MonoCompile *cfg, int when,
int tag, int reg,
int val);
MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops);
void mono_tramp_info_free (MonoTrampInfo *info);
void mono_aot_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager);
void mono_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager);
int mini_exception_id_by_name (const char *name);
gboolean mini_type_is_hfa (MonoType *t, int *out_nfields, int *out_esize);
int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
MonoInst *return_var, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call);
//the following methods could just be renamed/moved from method-to-ir.c
int mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip,
guint real_offset, gboolean inline_always);
MonoInst* mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
MonoInst* mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data);
void mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check);
void mini_reset_cast_details (MonoCompile *cfg);
void mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass);
gboolean mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used);
MonoInst *mono_decompose_opcode (MonoCompile *cfg, MonoInst *ins);
void mono_decompose_long_opts (MonoCompile *cfg);
void mono_decompose_vtype_opts (MonoCompile *cfg);
void mono_decompose_array_access_opts (MonoCompile *cfg);
void mono_decompose_soft_float (MonoCompile *cfg);
void mono_local_emulate_ops (MonoCompile *cfg);
void mono_handle_global_vregs (MonoCompile *cfg);
void mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts);
void mono_allocate_gsharedvt_vars (MonoCompile *cfg);
void mono_if_conversion (MonoCompile *cfg);
/* Delegates */
char* mono_get_delegate_virtual_invoke_impl_name (gboolean load_imt_reg, int offset);
gpointer mono_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method);
void mono_codegen (MonoCompile *cfg);
void mono_call_inst_add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, int vreg, int hreg, int bank);
void mono_call_inst_add_outarg_vt (MonoCompile *cfg, MonoCallInst *call, MonoInst *outarg_vt);
/* methods that must be provided by the arch-specific port */
void mono_arch_init (void);
void mono_arch_finish_init (void);
void mono_arch_cleanup (void);
void mono_arch_cpu_init (void);
guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask);
const char *mono_arch_regname (int reg);
const char *mono_arch_fregname (int reg);
void mono_arch_exceptions_init (void);
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot);
guint8* mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot);
guint8 *mono_arch_create_llvm_native_thunk (guint8* addr);
gpointer mono_arch_get_get_tls_tramp (void);
GList *mono_arch_get_allocatable_int_vars (MonoCompile *cfg);
GList *mono_arch_get_global_int_regs (MonoCompile *cfg);
guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv);
void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target);
void mono_arch_flush_icache (guint8 *code, gint size);
guint8 *mono_arch_emit_prolog (MonoCompile *cfg);
void mono_arch_emit_epilog (MonoCompile *cfg);
void mono_arch_emit_exceptions (MonoCompile *cfg);
void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_fill_argument_info (MonoCompile *cfg);
void mono_arch_allocate_vars (MonoCompile *m);
int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info);
void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call);
void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src);
void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val);
MonoDynCallInfo *mono_arch_dyn_call_prepare (MonoMethodSignature *sig);
void mono_arch_dyn_call_free (MonoDynCallInfo *info);
int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info);
void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf);
void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf);
MonoInst *mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins);
void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins);
GSList* mono_arch_get_delegate_invoke_impls (void);
LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig);
guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji);
guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target);
GSList* mono_arch_get_cie_program (void);
void mono_arch_set_target (char *mtriple);
gboolean mono_arch_gsharedvt_sig_supported (MonoMethodSignature *sig);
gpointer mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_gsharedvt_call_info (MonoMemoryManager *mem_manager, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli);
gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode);
gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_);
int mono_arch_translate_tls_offset (int offset);
gboolean mono_arch_opcode_supported (int opcode);
MONO_COMPONENT_API void mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func);
gboolean mono_arch_have_fast_tls (void);
#ifdef MONO_ARCH_HAS_REGISTER_ICALL
void mono_arch_register_icall (void);
#endif
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
gboolean mono_arch_is_soft_float (void);
#else
static inline MONO_ALWAYS_INLINE gboolean
mono_arch_is_soft_float (void)
{
return FALSE;
}
#endif
/* Soft Debug support */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
MONO_COMPONENT_API void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip);
MONO_COMPONENT_API void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip);
MONO_COMPONENT_API void mono_arch_start_single_stepping (void);
MONO_COMPONENT_API void mono_arch_stop_single_stepping (void);
gboolean mono_arch_is_single_step_event (void *info, void *sigctx);
gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx);
MONO_COMPONENT_API void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji);
MONO_COMPONENT_API void mono_arch_skip_single_step (MonoContext *ctx);
SeqPointInfo *mono_arch_get_seq_point_info (guint8 *code);
#endif
gboolean
mono_arch_unwind_frame (MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
MonoContext *new_ctx, MonoLMF **lmf,
host_mgreg_t **save_locations,
StackFrameInfo *frame_info);
gpointer mono_arch_get_throw_exception_by_name (void);
gpointer mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot);
gboolean mono_arch_handle_exception (void *sigctx, gpointer obj);
void mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf);
gboolean mono_handle_soft_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, void *ctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, guint8* fault_addr);
void mono_handle_hard_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *mctx, guint8* fault_addr);
void mono_arch_undo_ip_adjustment (MonoContext *ctx);
void mono_arch_do_ip_adjustment (MonoContext *ctx);
gpointer mono_arch_ip_from_context (void *sigctx);
MONO_COMPONENT_API host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg);
MONO_COMPONENT_API host_mgreg_t*mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg);
MONO_COMPONENT_API void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val);
void mono_arch_flush_register_windows (void);
gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm);
gboolean mono_arch_is_int_overflow (void *sigctx, void *info);
void mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg);
guint32 mono_arch_get_patch_offset (guint8 *code);
gpointer*mono_arch_get_delegate_method_ptr_addr (guint8* code, host_mgreg_t *regs);
void mono_arch_create_vars (MonoCompile *cfg);
void mono_arch_save_unwind_info (MonoCompile *cfg);
void mono_arch_register_lowlevel_calls (void);
gpointer mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr);
gpointer mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr);
gpointer mono_arch_get_ftnptr_arg_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr);
gpointer mono_arch_get_gsharedvt_arg_trampoline (gpointer arg, gpointer addr);
void mono_arch_patch_callsite (guint8 *method_start, guint8 *code, guint8 *addr);
void mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr);
int mono_arch_get_this_arg_reg (guint8 *code);
gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code);
gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target);
gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg);
gpointer mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len);
MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code);
MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code);
gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp);
void mono_arch_notify_pending_exc (MonoThreadInfo *info);
guint8* mono_arch_get_call_target (guint8 *code);
guint32 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code);
GSList *mono_arch_get_trampolines (gboolean aot);
gpointer mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info);
gpointer mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info);
#ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP
// Moves data (arguments and return vt address) from the InterpFrame to the CallContext so a pinvoke call can be made.
void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig);
// Moves the return value from the InterpFrame to the ccontext, or to the retp (if native code passed the retvt address)
void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp);
// When entering interp from native, this moves the arguments from the ccontext to the InterpFrame. If we have a return
// vt address, we return it. This ret vt address needs to be passed to mono_arch_set_native_call_context_ret.
gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig);
// After the pinvoke call is done, this moves return value from the ccontext to the InterpFrame.
void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig);
#endif
/*New interruption machinery */
void
mono_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data);
void
mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data);
gboolean
mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo *info, /*optional*/ void *sigctx);
/* Exception handling */
typedef gboolean (*MonoJitStackWalk) (StackFrameInfo *frame, MonoContext *ctx, gpointer data);
void mono_exceptions_init (void);
gboolean mono_handle_exception (MonoContext *ctx, gpointer obj);
void mono_handle_native_crash (const char *signal, MonoContext *mctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo);
MONO_API void mono_print_thread_dump (void *sigctx);
MONO_API void mono_print_thread_dump_from_ctx (MonoContext *ctx);
MONO_COMPONENT_API void mono_walk_stack_with_ctx (MonoJitStackWalk func, MonoContext *start_ctx, MonoUnwindOptions unwind_options, void *user_data);
MONO_COMPONENT_API void mono_walk_stack_with_state (MonoJitStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions unwind_options, void *user_data);
void mono_walk_stack (MonoJitStackWalk func, MonoUnwindOptions options, void *user_data);
gboolean mono_thread_state_init_from_sigctx (MonoThreadUnwindState *ctx, void *sigctx);
void mono_thread_state_init (MonoThreadUnwindState *ctx);
MONO_COMPONENT_API gboolean mono_thread_state_init_from_current (MonoThreadUnwindState *ctx);
MONO_COMPONENT_API gboolean mono_thread_state_init_from_monoctx (MonoThreadUnwindState *ctx, MonoContext *mctx);
void mono_setup_altstack (MonoJitTlsData *tls);
void mono_free_altstack (MonoJitTlsData *tls);
gpointer mono_altstack_restore_prot (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp);
MONO_COMPONENT_API MonoJitInfo* mini_jit_info_table_find (gpointer addr);
MonoJitInfo* mini_jit_info_table_find_ext (gpointer addr, gboolean allow_trampolines);
G_EXTERN_C void mono_resume_unwind (MonoContext *ctx);
MonoJitInfo * mono_find_jit_info (MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx, MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset, gboolean *managed);
typedef gboolean (*MonoExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data);
MONO_API gboolean mono_exception_walk_trace (MonoException *ex, MonoExceptionFrameWalk func, gpointer user_data);
MONO_COMPONENT_API void mono_restore_context (MonoContext *ctx);
guint8* mono_jinfo_get_unwind_info (MonoJitInfo *ji, guint32 *unwind_info_len);
int mono_jinfo_get_epilog_size (MonoJitInfo *ji);
gboolean
mono_find_jit_info_ext (MonoJitTlsData *jit_tls,
MonoJitInfo *prev_ji, MonoContext *ctx,
MonoContext *new_ctx, char **trace, MonoLMF **lmf,
host_mgreg_t **save_locations,
StackFrameInfo *frame);
gpointer mono_get_throw_exception (void);
gpointer mono_get_rethrow_exception (void);
gpointer mono_get_rethrow_preserve_exception (void);
gpointer mono_get_call_filter (void);
gpointer mono_get_restore_context (void);
gpointer mono_get_throw_corlib_exception (void);
gpointer mono_get_throw_exception_addr (void);
gpointer mono_get_rethrow_preserve_exception_addr (void);
ICALL_EXPORT
MonoArray *ves_icall_get_trace (MonoException *exc, gint32 skip, MonoBoolean need_file_info);
ICALL_EXPORT
MonoBoolean ves_icall_get_frame_info (gint32 skip, MonoBoolean need_file_info,
MonoReflectionMethod **method,
gint32 *iloffset, gint32 *native_offset,
MonoString **file, gint32 *line, gint32 *column);
void mono_set_cast_details (MonoClass *from, MonoClass *to);
void mono_decompose_typechecks (MonoCompile *cfg);
/* Dominator/SSA methods */
void mono_compile_dominator_info (MonoCompile *cfg, int dom_flags);
void mono_compute_natural_loops (MonoCompile *cfg);
MonoBitSet* mono_compile_iterated_dfrontier (MonoCompile *cfg, MonoBitSet *set);
void mono_ssa_compute (MonoCompile *cfg);
void mono_ssa_remove (MonoCompile *cfg);
void mono_ssa_remove_gsharedvt (MonoCompile *cfg);
void mono_ssa_cprop (MonoCompile *cfg);
void mono_ssa_deadce (MonoCompile *cfg);
void mono_ssa_strength_reduction (MonoCompile *cfg);
void mono_free_loop_info (MonoCompile *cfg);
void mono_ssa_loop_invariant_code_motion (MonoCompile *cfg);
void mono_ssa_compute2 (MonoCompile *cfg);
void mono_ssa_remove2 (MonoCompile *cfg);
void mono_ssa_cprop2 (MonoCompile *cfg);
void mono_ssa_deadce2 (MonoCompile *cfg);
/* debugging support */
void mono_debug_init_method (MonoCompile *cfg, MonoBasicBlock *start_block,
guint32 breakpoint_id);
void mono_debug_open_method (MonoCompile *cfg);
void mono_debug_close_method (MonoCompile *cfg);
void mono_debug_free_method (MonoCompile *cfg);
void mono_debug_open_block (MonoCompile *cfg, MonoBasicBlock *bb, guint32 address);
void mono_debug_record_line_number (MonoCompile *cfg, MonoInst *ins, guint32 address);
void mono_debug_serialize_debug_info (MonoCompile *cfg, guint8 **out_buf, guint32 *buf_len);
void mono_debug_add_aot_method (MonoMethod *method, guint8 *code_start,
guint8 *debug_info, guint32 debug_info_len);
MONO_API void mono_debug_print_vars (gpointer ip, gboolean only_arguments);
MONO_API void mono_debugger_run_finally (MonoContext *start_ctx);
MONO_API gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size);
/* Tracing */
MonoCallSpec *mono_trace_set_options (const char *options);
gboolean mono_trace_eval (MonoMethod *method);
gboolean
mono_tailcall_print_enabled (void);
void
mono_tailcall_print (const char *format, ...);
gboolean
mono_is_supported_tailcall_helper (gboolean value, const char *svalue);
#define IS_SUPPORTED_TAILCALL(x) (mono_is_supported_tailcall_helper((x), #x))
extern void
mono_perform_abc_removal (MonoCompile *cfg);
extern void
mono_perform_abc_removal (MonoCompile *cfg);
extern void
mono_local_cprop (MonoCompile *cfg);
extern void
mono_local_cprop (MonoCompile *cfg);
extern void
mono_local_deadce (MonoCompile *cfg);
void
mono_local_alias_analysis (MonoCompile *cfg);
/* Generic sharing */
void
mono_set_generic_sharing_supported (gboolean supported);
void
mono_set_generic_sharing_vt_supported (gboolean supported);
void
mono_set_partial_sharing_supported (gboolean supported);
gboolean
mono_class_generic_sharing_enabled (MonoClass *klass);
gpointer
mono_class_fill_runtime_generic_context (MonoVTable *class_vtable, guint32 slot, MonoError *error);
gpointer
mono_method_fill_runtime_generic_context (MonoMethodRuntimeGenericContext *mrgctx, guint32 slot, MonoError *error);
const char*
mono_rgctx_info_type_to_str (MonoRgctxInfoType type);
MonoJumpInfoType
mini_rgctx_info_type_to_patch_info_type (MonoRgctxInfoType info_type);
gboolean
mono_method_needs_static_rgctx_invoke (MonoMethod *method, gboolean allow_type_vars);
int
mono_class_rgctx_get_array_size (int n, gboolean mrgctx);
MonoGenericContext
mono_method_construct_object_context (MonoMethod *method);
MONO_COMPONENT_API MonoMethod*
mono_method_get_declaring_generic_method (MonoMethod *method);
int
mono_generic_context_check_used (MonoGenericContext *context);
int
mono_class_check_context_used (MonoClass *klass);
gboolean
mono_generic_context_is_sharable (MonoGenericContext *context, gboolean allow_type_vars);
gboolean
mono_generic_context_is_sharable_full (MonoGenericContext *context, gboolean allow_type_vars, gboolean allow_partial);
gboolean
mono_method_is_generic_impl (MonoMethod *method);
gboolean
mono_method_is_generic_sharable (MonoMethod *method, gboolean allow_type_vars);
gboolean
mono_method_is_generic_sharable_full (MonoMethod *method, gboolean allow_type_vars, gboolean allow_partial, gboolean allow_gsharedvt);
gboolean
mini_class_is_generic_sharable (MonoClass *klass);
gboolean
mini_generic_inst_is_sharable (MonoGenericInst *inst, gboolean allow_type_vars, gboolean allow_partial);
MonoMethod*
mono_class_get_method_generic (MonoClass *klass, MonoMethod *method, MonoError *error);
gboolean
mono_is_partially_sharable_inst (MonoGenericInst *inst);
gboolean
mini_is_gsharedvt_gparam (MonoType *t);
gboolean
mini_is_gsharedvt_inst (MonoGenericInst *inst);
MonoGenericContext* mini_method_get_context (MonoMethod *method);
int mono_method_check_context_used (MonoMethod *method);
gboolean mono_generic_context_equal_deep (MonoGenericContext *context1, MonoGenericContext *context2);
gpointer mono_helper_get_rgctx_other_ptr (MonoClass *caller_class, MonoVTable *vtable,
guint32 token, guint32 token_source, guint32 rgctx_type,
gint32 rgctx_index);
void mono_generic_sharing_init (void);
MonoClass* mini_class_get_container_class (MonoClass *klass);
MonoGenericContext* mini_class_get_context (MonoClass *klass);
typedef enum {
SHARE_MODE_NONE = 0x0,
SHARE_MODE_GSHAREDVT = 0x1,
} GetSharedMethodFlags;
MonoType* mini_get_underlying_type (MonoType *type);
MonoType* mini_type_get_underlying_type (MonoType *type);
MonoClass* mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context);
MonoMethod* mini_get_shared_method_to_register (MonoMethod *method);
MonoMethod* mini_get_shared_method_full (MonoMethod *method, GetSharedMethodFlags flags, MonoError *error);
MonoType* mini_get_shared_gparam (MonoType *t, MonoType *constraint);
int mini_get_rgctx_entry_slot (MonoJumpInfoRgctxEntry *entry);
int mini_type_stack_size (MonoType *t, int *align);
int mini_type_stack_size_full (MonoType *t, guint32 *align, gboolean pinvoke);
void mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst);
guint mono_type_to_regmove (MonoCompile *cfg, MonoType *type);
void mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb);
void mono_cfg_set_exception (MonoCompile *cfg, MonoExceptionType type);
void mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg);
#define MONO_TIME_TRACK(a, phase) \
{ \
gint64 start = mono_time_track_start (); \
(phase) ; \
mono_time_track_end (&(a), start); \
}
gint64 mono_time_track_start (void);
void mono_time_track_end (gint64 *time, gint64 start);
void mono_update_jit_stats (MonoCompile *cfg);
gboolean mini_type_is_reference (MonoType *type);
gboolean mini_type_is_vtype (MonoType *t);
gboolean mini_type_var_is_vt (MonoType *type);
gboolean mini_is_gsharedvt_type (MonoType *t);
gboolean mini_is_gsharedvt_klass (MonoClass *klass);
gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig);
gboolean mini_is_gsharedvt_variable_type (MonoType *t);
gboolean mini_is_gsharedvt_variable_klass (MonoClass *klass);
gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method);
gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig);
gboolean mini_is_gsharedvt_sharable_inst (MonoGenericInst *inst);
gboolean mini_method_is_default_method (MonoMethod *m);
gboolean mini_method_needs_mrgctx (MonoMethod *m);
gpointer mini_method_get_rgctx (MonoMethod *m);
void mini_init_gsctx (MonoMemPool *mp, MonoGenericContext *context, MonoGenericSharingContext *gsctx);
gpointer mini_get_gsharedvt_wrapper (gboolean gsharedvt_in, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig,
gint32 vcall_offset, gboolean calli);
MonoMethod* mini_get_gsharedvt_in_sig_wrapper (MonoMethodSignature *sig);
MonoMethod* mini_get_gsharedvt_out_sig_wrapper (MonoMethodSignature *sig);
MonoMethodSignature* mini_get_gsharedvt_out_sig_wrapper_signature (gboolean has_this, gboolean has_ret, int param_count);
gboolean mini_gsharedvt_runtime_invoke_supported (MonoMethodSignature *sig);
G_EXTERN_C void mono_interp_entry_from_trampoline (gpointer ccontext, gpointer imethod);
G_EXTERN_C void mono_interp_to_native_trampoline (gpointer addr, gpointer ccontext);
MonoMethod* mini_get_interp_in_wrapper (MonoMethodSignature *sig);
MonoMethod* mini_get_interp_lmf_wrapper (const char *name, gpointer target);
char* mono_get_method_from_ip (void *ip);
/* SIMD support */
typedef enum {
/* Used for lazy initialization */
MONO_CPU_INITED = 1 << 0,
#if defined(TARGET_X86) || defined(TARGET_AMD64)
MONO_CPU_X86_SSE = 1 << 1,
MONO_CPU_X86_SSE2 = 1 << 2,
MONO_CPU_X86_PCLMUL = 1 << 3,
MONO_CPU_X86_AES = 1 << 4,
MONO_CPU_X86_SSE3 = 1 << 5,
MONO_CPU_X86_SSSE3 = 1 << 6,
MONO_CPU_X86_SSE41 = 1 << 7,
MONO_CPU_X86_SSE42 = 1 << 8,
MONO_CPU_X86_POPCNT = 1 << 9,
MONO_CPU_X86_AVX = 1 << 10,
MONO_CPU_X86_AVX2 = 1 << 11,
MONO_CPU_X86_FMA = 1 << 12,
MONO_CPU_X86_LZCNT = 1 << 13,
MONO_CPU_X86_BMI1 = 1 << 14,
MONO_CPU_X86_BMI2 = 1 << 15,
//
// Dependencies (based on System.Runtime.Intrinsics.X86 class hierarchy):
//
// sse
// sse2
// pclmul
// aes
// sse3
// ssse3 (doesn't include 'pclmul' and 'aes')
// sse4.1
// sse4.2
// popcnt
// avx (doesn't include 'popcnt')
// avx2
// fma
// lzcnt
// bmi1
// bmi2
MONO_CPU_X86_SSE_COMBINED = MONO_CPU_X86_SSE,
MONO_CPU_X86_SSE2_COMBINED = MONO_CPU_X86_SSE_COMBINED | MONO_CPU_X86_SSE2,
MONO_CPU_X86_PCLMUL_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_PCLMUL,
MONO_CPU_X86_AES_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_AES,
MONO_CPU_X86_SSE3_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_SSE3,
MONO_CPU_X86_SSSE3_COMBINED = MONO_CPU_X86_SSE3_COMBINED | MONO_CPU_X86_SSSE3,
MONO_CPU_X86_SSE41_COMBINED = MONO_CPU_X86_SSSE3_COMBINED | MONO_CPU_X86_SSE41,
MONO_CPU_X86_SSE42_COMBINED = MONO_CPU_X86_SSE41_COMBINED | MONO_CPU_X86_SSE42,
MONO_CPU_X86_POPCNT_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_POPCNT,
MONO_CPU_X86_AVX_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_AVX,
MONO_CPU_X86_AVX2_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_AVX2,
MONO_CPU_X86_FMA_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_FMA,
MONO_CPU_X86_FULL_SSEAVX_COMBINED = MONO_CPU_X86_FMA_COMBINED | MONO_CPU_X86_AVX2 | MONO_CPU_X86_PCLMUL
| MONO_CPU_X86_AES | MONO_CPU_X86_POPCNT | MONO_CPU_X86_FMA,
#endif
#ifdef TARGET_WASM
MONO_CPU_WASM_SIMD = 1 << 1,
#endif
#ifdef TARGET_ARM64
MONO_CPU_ARM64_BASE = 1 << 1,
MONO_CPU_ARM64_CRC = 1 << 2,
MONO_CPU_ARM64_CRYPTO = 1 << 3,
MONO_CPU_ARM64_NEON = 1 << 4,
MONO_CPU_ARM64_RDM = 1 << 5,
MONO_CPU_ARM64_DP = 1 << 6,
#endif
} MonoCPUFeatures;
G_ENUM_FUNCTIONS (MonoCPUFeatures)
MonoCPUFeatures mini_get_cpu_features (MonoCompile* cfg);
enum {
SIMD_COMP_EQ,
SIMD_COMP_LT,
SIMD_COMP_LE,
SIMD_COMP_UNORD,
SIMD_COMP_NEQ,
SIMD_COMP_NLT,
SIMD_COMP_NLE,
SIMD_COMP_ORD
};
enum {
SIMD_PREFETCH_MODE_NTA,
SIMD_PREFETCH_MODE_0,
SIMD_PREFETCH_MODE_1,
SIMD_PREFETCH_MODE_2,
};
const char *mono_arch_xregname (int reg);
MonoCPUFeatures mono_arch_get_cpu_features (void);
#ifdef MONO_ARCH_SIMD_INTRINSICS
void mono_simd_simplify_indirection (MonoCompile *cfg);
void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins);
MonoInst* mono_emit_simd_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
MonoInst* mono_emit_simd_field_load (MonoCompile *cfg, MonoClassField *field, MonoInst *addr);
void mono_simd_intrinsics_init (void);
#endif
gboolean mono_class_is_magic_int (MonoClass *klass);
gboolean mono_class_is_magic_float (MonoClass *klass);
MonoInst* mono_emit_native_types_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
gsize mini_magic_type_size (MonoCompile *cfg, MonoType *type);
gboolean mini_magic_is_int_type (MonoType *t);
gboolean mini_magic_is_float_type (MonoType *t);
MonoType* mini_native_type_replace_type (MonoType *type);
MonoMethod*
mini_method_to_shared (MonoMethod *method); // null if not shared
static inline gboolean
mini_safepoints_enabled (void)
{
#if defined (TARGET_WASM)
return FALSE;
#else
return TRUE;
#endif
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id);
MONO_COMPONENT_API MonoGenericContext
mono_get_generic_context_from_stack_frame (MonoJitInfo *ji, gpointer generic_info);
MONO_COMPONENT_API gpointer
mono_get_generic_info_from_stack_frame (MonoJitInfo *ji, MonoContext *ctx);
MonoMemoryManager* mini_get_default_mem_manager (void);
MONO_COMPONENT_API int
mono_wasm_get_debug_level (void);
#endif /* __MONO_MINI_H__ */
| /**
* \file
* Copyright 2002-2003 Ximian Inc
* Copyright 2003-2011 Novell Inc
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_MINI_H__
#define __MONO_MINI_H__
#include "config.h"
#include <glib.h>
#include <signal.h>
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include <mono/utils/mono-forward-internal.h>
#include <mono/metadata/loader.h>
#include <mono/metadata/mempool.h>
#include <mono/utils/monobitset.h>
#include <mono/metadata/class.h>
#include <mono/metadata/object.h>
#include <mono/metadata/opcodes.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/domain-internals.h>
#include "mono/metadata/class-internals.h"
#include "mono/metadata/class-init.h"
#include "mono/metadata/object-internals.h"
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/jit-info.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-machine.h>
#include <mono/utils/mono-stack-unwinding.h>
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-threads-coop.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/atomic.h>
#include <mono/utils/mono-jemalloc.h>
#include <mono/utils/mono-conc-hashtable.h>
#include <mono/utils/mono-signal-handler.h>
#include <mono/utils/ftnptr.h>
#include <mono/metadata/icalls.h>
// Forward declare so that mini-*.h can have pointers to them.
// CallInfo is presently architecture specific.
typedef struct MonoInst MonoInst;
typedef struct CallInfo CallInfo;
typedef struct SeqPointInfo SeqPointInfo;
#include "mini-arch.h"
#include "regalloc.h"
#include "mini-unwind.h"
#include <mono/jit/jit.h>
#include "cfgdump.h"
#include "tiered.h"
#include "mono/metadata/tabledefs.h"
#include "mono/metadata/marshal.h"
#include "mono/metadata/exception.h"
#include "mono/metadata/callspec.h"
#include "mono/metadata/icall-signatures.h"
/*
* The mini code should not have any compile time dependencies on the GC being used, so the same object file from mini/
* can be linked into both mono and mono-sgen.
*/
#if !defined(MONO_DLL_EXPORT) || !defined(_MSC_VER)
#if defined(HAVE_BOEHM_GC) || defined(HAVE_SGEN_GC)
#error "The code in mini/ should not depend on these defines."
#endif
#endif
#ifndef __GNUC__
/*#define __alignof__(a) sizeof(a)*/
#define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
#endif
#if DISABLE_LOGGING
#define MINI_DEBUG(level,limit,code)
#else
#define MINI_DEBUG(level,limit,code) do {if (G_UNLIKELY ((level) >= (limit))) code} while (0)
#endif
#if !defined(DISABLE_TASKLETS) && defined(MONO_ARCH_SUPPORT_TASKLETS)
#if defined(__GNUC__)
#define MONO_SUPPORT_TASKLETS 1
#elif defined(HOST_WIN32)
#define MONO_SUPPORT_TASKLETS 1
// Replace some gnu intrinsics needed for tasklets with MSVC equivalents.
#define __builtin_extract_return_addr(x) x
#define __builtin_return_address(x) _ReturnAddress()
#define __builtin_frame_address(x) _AddressOfReturnAddress()
#endif
#endif
#if ENABLE_LLVM
#define COMPILE_LLVM(cfg) ((cfg)->compile_llvm)
#define LLVM_ENABLED TRUE
#else
#define COMPILE_LLVM(cfg) (0)
#define LLVM_ENABLED FALSE
#endif
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
#define COMPILE_SOFT_FLOAT(cfg) (!COMPILE_LLVM ((cfg)) && mono_arch_is_soft_float ())
#else
#define COMPILE_SOFT_FLOAT(cfg) (0)
#endif
#define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
/* for 32 bit systems */
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define MINI_LS_WORD_IDX 0
#define MINI_MS_WORD_IDX 1
#else
#define MINI_LS_WORD_IDX 1
#define MINI_MS_WORD_IDX 0
#endif
#define MINI_LS_WORD_OFFSET (MINI_LS_WORD_IDX * 4)
#define MINI_MS_WORD_OFFSET (MINI_MS_WORD_IDX * 4)
#define MONO_LVREG_LS(lvreg) ((lvreg) + 1)
#define MONO_LVREG_MS(lvreg) ((lvreg) + 2)
#ifndef DISABLE_AOT
#define MONO_USE_AOT_COMPILER
#endif
//TODO: This is x86/amd64 specific.
#define mono_simd_shuffle_mask(a,b,c,d) ((a) | ((b) << 2) | ((c) << 4) | ((d) << 6))
/* Remap printf to g_print (we use a mix of these in the mini code) */
#ifdef HOST_ANDROID
#define printf g_print
#endif
#define MONO_TYPE_IS_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
#define MONO_TYPE_IS_VECTOR_PRIMITIVE(t) ((!m_type_is_byref ((t)) && ((((t)->type >= MONO_TYPE_I1 && (t)->type <= MONO_TYPE_R8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
//XXX this ignores if t is byref
#define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
typedef struct
{
MonoClass *klass;
MonoMethod *method;
} MonoClassMethodPair;
typedef struct
{
MonoClass *klass;
MonoMethod *method;
gboolean is_virtual;
} MonoDelegateClassMethodPair;
typedef struct {
MonoJitInfo *ji;
MonoCodeManager *code_mp;
} MonoJitDynamicMethodInfo;
/* An extension of MonoGenericParamFull used in generic sharing */
typedef struct {
MonoGenericParamFull param;
MonoGenericParam *parent;
} MonoGSharedGenericParam;
/* Contains a list of ips which needs to be patched when a method is compiled */
typedef struct {
GSList *list;
} MonoJumpList;
/* Arch-specific */
typedef struct {
int dummy;
} MonoDynCallInfo;
typedef struct {
guint32 index;
MonoExceptionClause *clause;
} MonoLeaveClause;
/*
* Information about a stack frame.
* FIXME This typedef exists only to avoid tons of code rewriting
*/
typedef MonoStackFrameInfo StackFrameInfo;
#if 0
#define mono_bitset_foreach_bit(set,b,n) \
for (b = 0; b < n; b++)\
if (mono_bitset_test_fast(set,b))
#else
#define mono_bitset_foreach_bit(set,b,n) \
for (b = mono_bitset_find_start (set); b < n && b >= 0; b = mono_bitset_find_first (set, b))
#endif
/*
* Pull the list of opcodes
*/
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
a = i,
enum {
#include "mono/cil/opcode.def"
CEE_LASTOP
};
#undef OPDEF
#define MONO_VARINFO(cfg,varnum) (&(cfg)->vars [varnum])
#define MONO_INST_NULLIFY_SREGS(dest) do { \
(dest)->sreg1 = (dest)->sreg2 = (dest)->sreg3 = -1; \
} while (0)
#define MONO_INST_NEW(cfg,dest,op) do { \
(dest) = (MonoInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
(dest)->dreg = -1; \
MONO_INST_NULLIFY_SREGS ((dest)); \
(dest)->cil_code = (cfg)->ip; \
} while (0)
#define MONO_INST_NEW_CALL(cfg,dest,op) do { \
(dest) = (MonoCallInst *)mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoCallInst)); \
(dest)->inst.opcode = (op); \
(dest)->inst.dreg = -1; \
MONO_INST_NULLIFY_SREGS (&(dest)->inst); \
(dest)->inst.cil_code = (cfg)->ip; \
} while (0)
#define MONO_ADD_INS(b,inst) do { \
if ((b)->last_ins) { \
(b)->last_ins->next = (inst); \
(inst)->prev = (b)->last_ins; \
(b)->last_ins = (inst); \
} else { \
(b)->code = (b)->last_ins = (inst); \
} \
} while (0)
#define NULLIFY_INS(ins) do { \
(ins)->opcode = OP_NOP; \
(ins)->dreg = -1; \
MONO_INST_NULLIFY_SREGS ((ins)); \
} while (0)
/* Remove INS from BB */
#define MONO_REMOVE_INS(bb,ins) do { \
if ((ins)->prev) \
(ins)->prev->next = (ins)->next; \
if ((ins)->next) \
(ins)->next->prev = (ins)->prev; \
if ((bb)->code == (ins)) \
(bb)->code = (ins)->next; \
if ((bb)->last_ins == (ins)) \
(bb)->last_ins = (ins)->prev; \
} while (0)
/* Remove INS from BB and nullify it */
#define MONO_DELETE_INS(bb,ins) do { \
MONO_REMOVE_INS ((bb), (ins)); \
NULLIFY_INS ((ins)); \
} while (0)
/*
* this is used to determine when some branch optimizations are possible: we exclude FP compares
* because they have weird semantics with NaNs.
*/
#define MONO_IS_COND_BRANCH_OP(ins) (((ins)->opcode >= OP_LBEQ && (ins)->opcode <= OP_LBLT_UN) || ((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_IBEQ && (ins)->opcode <= OP_IBLT_UN))
#define MONO_IS_COND_BRANCH_NOFP(ins) (MONO_IS_COND_BRANCH_OP(ins) && !(((ins)->opcode >= OP_FBEQ) && ((ins)->opcode <= OP_FBLT_UN)))
#define MONO_IS_BRANCH_OP(ins) (MONO_IS_COND_BRANCH_OP(ins) || ((ins)->opcode == OP_BR) || ((ins)->opcode == OP_BR_REG) || ((ins)->opcode == OP_SWITCH))
#define MONO_IS_COND_EXC(ins) ((((ins)->opcode >= OP_COND_EXC_EQ) && ((ins)->opcode <= OP_COND_EXC_LT_UN)) || (((ins)->opcode >= OP_COND_EXC_IEQ) && ((ins)->opcode <= OP_COND_EXC_ILT_UN)))
#define MONO_IS_SETCC(ins) ((((ins)->opcode >= OP_CEQ) && ((ins)->opcode <= OP_CLT_UN)) || (((ins)->opcode >= OP_ICEQ) && ((ins)->opcode <= OP_ICLE_UN)) || (((ins)->opcode >= OP_LCEQ) && ((ins)->opcode <= OP_LCLT_UN)) || (((ins)->opcode >= OP_FCEQ) && ((ins)->opcode <= OP_FCLT_UN)))
#define MONO_HAS_CUSTOM_EMULATION(ins) (((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_FCEQ && (ins)->opcode <= OP_FCLT_UN))
#define MONO_IS_LOAD_MEMBASE(ins) (((ins)->opcode >= OP_LOAD_MEMBASE && (ins)->opcode <= OP_LOADV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_LOAD_I1 && (ins)->opcode <= OP_ATOMIC_LOAD_R8))
#define MONO_IS_STORE_MEMBASE(ins) (((ins)->opcode >= OP_STORE_MEMBASE_REG && (ins)->opcode <= OP_STOREV_MEMBASE) || ((ins)->opcode >= OP_ATOMIC_STORE_I1 && (ins)->opcode <= OP_ATOMIC_STORE_R8))
#define MONO_IS_STORE_MEMINDEX(ins) (((ins)->opcode >= OP_STORE_MEMINDEX) && ((ins)->opcode <= OP_STORER8_MEMINDEX))
// This is internal because it is easily confused with any enum or integer.
#define MONO_IS_TAILCALL_OPCODE_INTERNAL(opcode) ((opcode) == OP_TAILCALL || (opcode) == OP_TAILCALL_MEMBASE || (opcode) == OP_TAILCALL_REG)
#define MONO_IS_TAILCALL_OPCODE(call) (MONO_IS_TAILCALL_OPCODE_INTERNAL (call->inst.opcode))
// OP_DYN_CALL is not a MonoCallInst
#define MONO_IS_CALL(ins) (((ins)->opcode >= OP_VOIDCALL && (ins)->opcode <= OP_VCALL2_MEMBASE) || \
MONO_IS_TAILCALL_OPCODE_INTERNAL ((ins)->opcode))
#define MONO_IS_JUMP_TABLE(ins) (((ins)->opcode == OP_JUMP_TABLE) ? TRUE : ((((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : ((ins)->opcode == OP_SWITCH) ? TRUE : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : FALSE)))
#define MONO_JUMP_TABLE_FROM_INS(ins) (((ins)->opcode == OP_JUMP_TABLE) ? (ins)->inst_p0 : (((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH) ? (ins)->inst_p0 : (((ins)->opcode == OP_SWITCH) ? (ins)->inst_p0 : ((((ins)->opcode == OP_GOT_ENTRY) && ((ins)->inst_right->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? (ins)->inst_right->inst_p0 : NULL))))
#define MONO_INS_HAS_NO_SIDE_EFFECT(ins) (mono_ins_no_side_effects ((ins)))
#define MONO_INS_IS_PCONST_NULL(ins) ((ins)->opcode == OP_PCONST && (ins)->inst_p0 == 0)
#define MONO_METHOD_IS_FINAL(m) (((m)->flags & METHOD_ATTRIBUTE_FINAL) || ((m)->klass && (mono_class_get_flags ((m)->klass) & TYPE_ATTRIBUTE_SEALED)))
/* Determine whenever 'ins' represents a load of the 'this' argument */
#define MONO_CHECK_THIS(ins) (mono_method_signature_internal (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
#ifdef MONO_ARCH_SIMD_INTRINSICS
#define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI) || ((ins)->opcode == OP_XPHI))
#define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_XMOVE))
#define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_XMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_ZERO(ins) (((ins)->opcode == OP_VZERO) || ((ins)->opcode == OP_XZERO))
#ifdef TARGET_ARM64
/*
* SIMD is only supported on arm64 when using the LLVM backend. When not using
* the LLVM backend, treat SIMD datatypes as regular value types.
*/
#define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && COMPILE_LLVM (cfg) && m_class_is_simd_type (klass))
#else
#define MONO_CLASS_IS_SIMD(cfg, klass) (((cfg)->opt & MONO_OPT_SIMD) && m_class_is_simd_type (klass) && (COMPILE_LLVM (cfg) || mono_type_size (m_class_get_byval_arg (klass), NULL) == 16))
#endif
#else
#define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI))
#define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_NON_FP_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_VMOVE))
/*A real MOVE is one that isn't decomposed such as a VMOVE or LMOVE*/
#define MONO_IS_REAL_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_RMOVE))
#define MONO_IS_ZERO(ins) ((ins)->opcode == OP_VZERO)
#define MONO_CLASS_IS_SIMD(cfg, klass) (0)
#endif
#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
(dest)->dreg = alloc_ireg_mp ((cfg)); \
(dest)->sreg1 = (sr1); \
(dest)->sreg2 = (sr2); \
(dest)->inst_imm = (imm); \
(dest)->backend.shift_amount = (shift); \
MONO_ADD_INS ((cfg)->cbb, (dest)); \
} while (0)
#endif
typedef struct MonoInstList MonoInstList;
typedef struct MonoCallInst MonoCallInst;
typedef struct MonoCallArgParm MonoCallArgParm;
typedef struct MonoMethodVar MonoMethodVar;
typedef struct MonoBasicBlock MonoBasicBlock;
typedef struct MonoSpillInfo MonoSpillInfo;
extern MonoCallSpec *mono_jit_trace_calls;
extern MonoMethodDesc *mono_inject_async_exc_method;
extern int mono_inject_async_exc_pos;
extern MonoMethodDesc *mono_break_at_bb_method;
extern int mono_break_at_bb_bb_num;
extern gboolean mono_do_x86_stack_align;
extern int mini_verbose;
extern int valgrind_register;
#define INS_INFO(opcode) (&mini_ins_info [((opcode) - OP_START - 1) * 4])
/* instruction description for use in regalloc/scheduling */
enum {
MONO_INST_DEST = 0,
MONO_INST_SRC1 = 1, /* we depend on the SRCs to be consecutive */
MONO_INST_SRC2 = 2,
MONO_INST_SRC3 = 3,
MONO_INST_LEN = 4,
MONO_INST_CLOB = 5,
/* Unused, commented out to reduce the size of the mdesc tables
MONO_INST_FLAGS,
MONO_INST_COST,
MONO_INST_DELAY,
MONO_INST_RES,
*/
MONO_INST_MAX = 6
};
typedef union MonoInstSpec { // instruction specification
struct {
char dest;
char src1;
char src2;
char src3;
unsigned char len;
char clob;
// char flags;
// char cost;
// char delay;
// char res;
};
struct {
char xdest;
char src [3];
unsigned char xlen;
char xclob;
};
char bytes[MONO_INST_MAX];
} MonoInstSpec;
extern const char mini_ins_info[];
extern const gint8 mini_ins_sreg_counts [];
#ifndef DISABLE_JIT
#define mono_inst_get_num_src_registers(ins) (mini_ins_sreg_counts [(ins)->opcode - OP_START - 1])
#else
#define mono_inst_get_num_src_registers(ins) 0
#endif
#define mono_inst_get_src_registers(ins, regs) (((regs) [0] = (ins)->sreg1), ((regs) [1] = (ins)->sreg2), ((regs) [2] = (ins)->sreg3), mono_inst_get_num_src_registers ((ins)))
#define MONO_BB_FOR_EACH_INS(bb, ins) for ((ins) = (bb)->code; (ins); (ins) = (ins)->next)
#define MONO_BB_FOR_EACH_INS_SAFE(bb, n, ins) for ((ins) = (bb)->code, n = (ins) ? (ins)->next : NULL; (ins); (ins) = (n), (n) = (ins) ? (ins)->next : NULL)
#define MONO_BB_FOR_EACH_INS_REVERSE(bb, ins) for ((ins) = (bb)->last_ins; (ins); (ins) = (ins)->prev)
#define MONO_BB_FOR_EACH_INS_REVERSE_SAFE(bb, p, ins) for ((ins) = (bb)->last_ins, p = (ins) ? (ins)->prev : NULL; (ins); (ins) = (p), (p) = (ins) ? (ins)->prev : NULL)
#define mono_bb_first_ins(bb) (bb)->code
/*
* Iterate through all used registers in the instruction.
* Relies on the existing order of the MONO_INST enum: MONO_INST_{DREG,SREG1,SREG2,SREG3,LEN}
* INS is the instruction, IDX is the register index, REG is the pointer to a register.
*/
#define MONO_INS_FOR_EACH_REG(ins, idx, reg) for ((idx) = INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ' ? MONO_INST_DEST : \
(mono_inst_get_num_src_registers (ins) ? MONO_INST_SRC1 : MONO_INST_LEN); \
(reg) = (idx) == MONO_INST_DEST ? &(ins)->dreg : \
((idx) == MONO_INST_SRC1 ? &(ins)->sreg1 : \
((idx) == MONO_INST_SRC2 ? &(ins)->sreg2 : \
((idx) == MONO_INST_SRC3 ? &(ins)->sreg3 : NULL))), \
idx < MONO_INST_LEN; \
(idx) = (idx) > mono_inst_get_num_src_registers (ins) + (INS_INFO ((ins)->opcode)[MONO_INST_DEST] != ' ') ? MONO_INST_LEN : (idx) + 1)
struct MonoSpillInfo {
int offset;
};
/*
* Information about a call site for the GC map creation code
*/
typedef struct {
/* The next offset after the call instruction */
int pc_offset;
/* The basic block containing the call site */
MonoBasicBlock *bb;
/*
* The set of variables live at the call site.
* Has length cfg->num_varinfo in bits.
*/
guint8 *liveness;
/*
* List of OP_GC_PARAM_SLOT_LIVENESS_DEF instructions defining the param slots
* used by this call.
*/
GSList *param_slots;
} GCCallSite;
/*
* The IR-level extended basic block.
*
* A basic block can have multiple exits just fine, as long as the point of
* 'departure' is the last instruction in the basic block. Extended basic
* blocks, on the other hand, may have instructions that leave the block
* midstream. The important thing is that they cannot be _entered_
* midstream, ie, execution of a basic block (or extened bb) always start
* at the beginning of the block, never in the middle.
*/
struct MonoBasicBlock {
MonoInst *last_ins;
/* the next basic block in the order it appears in IL */
MonoBasicBlock *next_bb;
/*
* Before instruction selection it is the first tree in the
* forest and the first item in the list of trees. After
* instruction selection it is the first instruction and the
* first item in the list of instructions.
*/
MonoInst *code;
/* unique block number identification */
gint32 block_num;
gint32 dfn;
/* Basic blocks: incoming and outgoing counts and pointers */
/* Each bb should only appear once in each array */
gint16 out_count, in_count;
MonoBasicBlock **in_bb;
MonoBasicBlock **out_bb;
/* Points to the start of the CIL code that initiated this BB */
unsigned char* cil_code;
/* Length of the CIL block */
gint32 cil_length;
/* The offset of the generated code, used for fixups */
int native_offset;
/* The length of the generated code, doesn't include alignment padding */
int native_length;
/* The real native offset, which includes alignment padding too */
int real_native_offset;
int max_offset;
int max_length;
/* Visited and reachable flags */
guint32 flags;
/*
* SSA and loop based flags
*/
MonoBitSet *dominators;
MonoBitSet *dfrontier;
MonoBasicBlock *idom;
GSList *dominated;
/* fast dominator algorithm */
MonoBasicBlock *df_parent, *ancestor, *child, *label;
int size, sdom, idomn;
/* loop nesting and recognition */
GList *loop_blocks;
gint8 nesting;
gint8 loop_body_start;
/*
* Whenever the bblock is rarely executed so it should be emitted after
* the function epilog.
*/
guint out_of_line : 1;
/* Caches the result of uselessness calculation during optimize_branches */
guint not_useless : 1;
/* Whenever the decompose_array_access_opts () pass needs to process this bblock */
guint needs_decompose : 1;
/* Whenever this bblock is extended, ie. it has branches inside it */
guint extended : 1;
/* Whenever this bblock contains a OP_JUMP_TABLE instruction */
guint has_jump_table : 1;
/* Whenever this bblock contains an OP_CALL_HANDLER instruction */
guint has_call_handler : 1;
/* Whenever this bblock starts a try block */
guint try_start : 1;
#ifdef ENABLE_LLVM
/* The offset of the CIL instruction in this bblock which ends a try block */
intptr_t try_end;
#endif
/*
* If this is set, extend the try range started by this bblock by an arch specific
* number of bytes to encompass the end of the previous bblock (e.g. a Monitor.Enter
* call).
*/
guint extend_try_block : 1;
/* use for liveness analysis */
MonoBitSet *gen_set;
MonoBitSet *kill_set;
MonoBitSet *live_in_set;
MonoBitSet *live_out_set;
/* fields to deal with non-empty stack slots at bb boundary */
guint16 out_scount, in_scount;
MonoInst **out_stack;
MonoInst **in_stack;
/* we use that to prevent merging of bblocks covered by different clauses*/
guint real_offset;
GSList *seq_points;
// The MonoInst of the last sequence point for the current basic block.
MonoInst *last_seq_point;
// This will hold a list of last sequence points of incoming basic blocks
MonoInst **pred_seq_points;
guint num_pred_seq_points;
GSList *spill_slot_defs;
/* List of call sites in this bblock sorted by pc_offset */
GSList *gc_callsites;
/*
* If this is not null, the basic block is a try hole for all the clauses
* in the list previous to this element (including the element).
*/
GList *clause_holes;
/*
* The region encodes whether the basic block is inside
* a finally, catch, filter or none of these.
*
* If the value is -1, then it is neither finally, catch nor filter
*
* Otherwise the format is:
*
* Bits: | 0-3 | 4-7 | 8-31
* | | |
* | clause-flags | MONO_REGION | clause-index
*
*/
guint region;
/* The current symbolic register number, used in local register allocation. */
guint32 max_vreg;
};
/* BBlock flags */
enum {
BB_VISITED = 1 << 0,
BB_REACHABLE = 1 << 1,
BB_EXCEPTION_DEAD_OBJ = 1 << 2,
BB_EXCEPTION_UNSAFE = 1 << 3,
BB_EXCEPTION_HANDLER = 1 << 4,
/* for Native Client, mark the blocks that can be jumped to indirectly */
BB_INDIRECT_JUMP_TARGET = 1 << 5 ,
/* Contains code with some side effects */
BB_HAS_SIDE_EFFECTS = 1 << 6,
};
typedef struct MonoMemcpyArgs {
int size, align;
} MonoMemcpyArgs;
typedef enum {
LLVMArgNone,
/* Scalar argument passed by value */
LLVMArgNormal,
/* Only in ainfo->pair_storage */
LLVMArgInIReg,
/* Only in ainfo->pair_storage */
LLVMArgInFPReg,
/* Valuetype passed in 1-2 consecutive register */
LLVMArgVtypeInReg,
LLVMArgVtypeByVal,
LLVMArgVtypeRetAddr, /* On on cinfo->ret */
LLVMArgGSharedVt,
/* Fixed size argument passed to/returned from gsharedvt method by ref */
LLVMArgGsharedvtFixed,
/* Fixed size vtype argument passed to/returned from gsharedvt method by ref */
LLVMArgGsharedvtFixedVtype,
/* Variable sized argument passed to/returned from gsharedvt method by ref */
LLVMArgGsharedvtVariable,
/* Vtype passed/returned as one int array argument */
LLVMArgAsIArgs,
/* Vtype passed as a set of fp arguments */
LLVMArgAsFpArgs,
/*
* Only for returns, a structure which
* consists of floats/doubles.
*/
LLVMArgFpStruct,
LLVMArgVtypeByRef,
/* Vtype returned as an int */
LLVMArgVtypeAsScalar,
/* Address to local vtype passed as argument (using register or stack). */
LLVMArgVtypeAddr,
/*
* On WASM, a one element vtype is passed/returned as a scalar with the same
* type as the element.
* esize is the size of the value.
*/
LLVMArgWasmVtypeAsScalar
} LLVMArgStorage;
typedef struct {
LLVMArgStorage storage;
/*
* Only if storage == ArgVtypeInReg/LLVMArgAsFpArgs.
* This contains how the parts of the vtype are passed.
*/
LLVMArgStorage pair_storage [8];
/*
* Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct.
* If storage == LLVMArgAsFpArgs, this is the number of arguments
* used to pass the value.
* If storage == LLVMArgFpStruct, this is the number of fields
* in the structure.
*/
int nslots;
/* Only if storage == LLVMArgAsIArgs/LLVMArgAsFpArgs/LLVMArgFpStruct (4/8) */
int esize;
/* Parameter index in the LLVM signature */
int pindex;
MonoType *type;
/* Only if storage == LLVMArgAsFpArgs. Dummy fp args to insert before this arg */
int ndummy_fpargs;
} LLVMArgInfo;
typedef struct {
LLVMArgInfo ret;
/* Whenever there is an rgctx argument */
gboolean rgctx_arg;
/* Whenever there is an IMT argument */
gboolean imt_arg;
/* Whenever there is a dummy extra argument */
gboolean dummy_arg;
/*
* The position of the vret arg in the argument list.
* Only if ret->storage == ArgVtypeRetAddr.
* Should be 0 or 1.
*/
int vret_arg_index;
/* The indexes of various special arguments in the LLVM signature */
int vret_arg_pindex, this_arg_pindex, rgctx_arg_pindex, imt_arg_pindex, dummy_arg_pindex;
/* Inline array of argument info */
/* args [0] is for the this argument if it exists */
LLVMArgInfo args [1];
} LLVMCallInfo;
#define MONO_MAX_SRC_REGS 3
struct MonoInst {
guint16 opcode;
guint8 type; /* stack type */
guint8 flags;
/* used by the register allocator */
gint32 dreg, sreg1, sreg2, sreg3;
MonoInst *next, *prev;
union {
union {
MonoInst *src;
MonoMethodVar *var;
target_mgreg_t const_val;
#if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN)
struct {
gpointer p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P];
} pdata;
#else
gpointer p;
#endif
MonoMethod *method;
MonoMethodSignature *signature;
MonoBasicBlock **many_blocks;
MonoBasicBlock *target_block;
MonoInst **args;
MonoType *vtype;
MonoClass *klass;
int *phi_args;
MonoCallInst *call_inst;
GList *exception_clauses;
const char *exc_name;
} op [2];
gint64 i8const;
double r8const;
} data;
const unsigned char* cil_code; /* for debugging and bblock splitting */
/* used mostly by the backend to store additional info it may need */
union {
gint32 reg3;
gint32 arg_info;
gint32 size;
MonoMemcpyArgs *memcpy_args; /* in OP_MEMSET and OP_MEMCPY */
gpointer data;
gint shift_amount;
gboolean is_pinvoke; /* for variables in the unmanaged marshal format */
gboolean record_cast_details; /* For CEE_CASTCLASS */
MonoInst *spill_var; /* for OP_MOVE_I4_TO_F/F_TO_I4 and OP_FCONV_TO_R8_X */
guint16 source_opcode; /*OP_XCONV_R8_TO_I4 needs to know which op was used to do proper widening*/
int pc_offset; /* OP_GC_LIVERANGE_START/END */
/*
* memory_barrier: MONO_MEMORY_BARRIER_{ACQ,REL,SEQ}
* atomic_load_*: MONO_MEMORY_BARRIER_{ACQ,SEQ}
* atomic_store_*: MONO_MEMORY_BARRIER_{REL,SEQ}
*/
int memory_barrier_kind;
} backend;
MonoClass *klass;
};
struct MonoCallInst {
MonoInst inst;
MonoMethodSignature *signature;
MonoMethod *method;
MonoInst **args;
MonoInst *out_args;
MonoInst *vret_var;
gconstpointer fptr;
MonoJitICallId jit_icall_id;
guint stack_usage;
guint stack_align_amount;
regmask_t used_iregs;
regmask_t used_fregs;
GSList *out_ireg_args;
GSList *out_freg_args;
GSList *outarg_vts;
CallInfo *call_info;
#ifdef ENABLE_LLVM
LLVMCallInfo *cinfo;
int rgctx_arg_reg, imt_arg_reg;
#endif
#ifdef TARGET_ARM
/* See the comment in mini-arm.c!mono_arch_emit_call for RegTypeFP. */
GSList *float_args;
#endif
// Bitfields are at the end to minimize padding for alignment,
// unless there is a placement to increase locality.
guint is_virtual : 1;
// FIXME tailcall field is written after read; prefer MONO_IS_TAILCALL_OPCODE.
guint tailcall : 1;
/* If this is TRUE, 'fptr' points to a MonoJumpInfo instead of an address. */
guint fptr_is_patch : 1;
/*
* If this is true, then the call returns a vtype in a register using the same
* calling convention as OP_CALL.
*/
guint vret_in_reg : 1;
/* Whenever vret_in_reg returns fp values */
guint vret_in_reg_fp : 1;
/* Whenever there is an IMT argument and it is dynamic */
guint dynamic_imt_arg : 1;
/* Whenever there is an RGCTX argument */
guint32 rgctx_reg : 1;
/* Whenever the call will need an unbox trampoline */
guint need_unbox_trampoline : 1;
};
struct MonoCallArgParm {
MonoInst ins;
gint32 size;
gint32 offset;
gint32 offPrm;
};
/*
* flags for MonoInst
* Note: some of the values overlap, because they can't appear
* in the same MonoInst.
*/
enum {
MONO_INST_HAS_METHOD = 1,
MONO_INST_INIT = 1, /* in localloc */
MONO_INST_SINGLE_STEP_LOC = 1, /* in SEQ_POINT */
MONO_INST_IS_DEAD = 2,
MONO_INST_TAILCALL = 4,
MONO_INST_VOLATILE = 4,
MONO_INST_NOTYPECHECK = 4,
MONO_INST_NONEMPTY_STACK = 4, /* in SEQ_POINT */
MONO_INST_UNALIGNED = 8,
MONO_INST_NESTED_CALL = 8, /* in SEQ_POINT */
MONO_INST_CFOLD_TAKEN = 8, /* On branches */
MONO_INST_CFOLD_NOT_TAKEN = 16, /* On branches */
MONO_INST_DEFINITION_HAS_SIDE_EFFECTS = 8,
/* the address of the variable has been taken */
MONO_INST_INDIRECT = 16,
MONO_INST_NORANGECHECK = 16,
/* On loads, the source address can be null */
MONO_INST_FAULT = 32,
/*
* On variables, identifies LMF variables. These variables have a dummy type (int), but
* require stack space for a MonoLMF struct.
*/
MONO_INST_LMF = 32,
/* On loads, the source address points to a constant value */
MONO_INST_INVARIANT_LOAD = 64,
/* On stores, the destination is the stack */
MONO_INST_STACK_STORE = 64,
/* On variables, the variable needs GC tracking */
MONO_INST_GC_TRACK = 128,
/*
* Set on instructions during code emission which make calls, i.e. OP_CALL, OP_THROW.
* backend.pc_offset will be set to the pc offset at the end of the native call instructions.
*/
MONO_INST_GC_CALLSITE = 128,
/* On comparisons, mark the branch following the condition as likely to be taken */
MONO_INST_LIKELY = 128,
MONO_INST_NONULLCHECK = 128,
};
#define inst_c0 data.op[0].const_val
#define inst_c1 data.op[1].const_val
#define inst_i0 data.op[0].src
#define inst_i1 data.op[1].src
#if (SIZEOF_REGISTER > TARGET_SIZEOF_VOID_P) && (G_BYTE_ORDER == G_BIG_ENDIAN)
#define inst_p0 data.op[0].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1]
#define inst_p1 data.op[1].pdata.p[SIZEOF_REGISTER/TARGET_SIZEOF_VOID_P - 1]
#else
#define inst_p0 data.op[0].p
#define inst_p1 data.op[1].p
#endif
#define inst_l data.i8const
#define inst_r data.r8const
#define inst_left data.op[0].src
#define inst_right data.op[1].src
#define inst_newa_len data.op[0].src
#define inst_newa_class data.op[1].klass
/* In _OVF opcodes */
#define inst_exc_name data.op[0].exc_name
#define inst_var data.op[0].var
#define inst_vtype data.op[1].vtype
/* in branch instructions */
#define inst_many_bb data.op[1].many_blocks
#define inst_target_bb data.op[0].target_block
#define inst_true_bb data.op[1].many_blocks[0]
#define inst_false_bb data.op[1].many_blocks[1]
#define inst_basereg sreg1
#define inst_indexreg sreg2
#define inst_destbasereg dreg
#define inst_offset data.op[0].const_val
#define inst_imm data.op[1].const_val
#define inst_call data.op[1].call_inst
#define inst_phi_args data.op[1].phi_args
#define inst_eh_blocks data.op[1].exception_clauses
/* Return the lower 32 bits of the 64 bit immediate in INS */
static inline guint32
ins_get_l_low (MonoInst *ins)
{
return (guint32)(ins->data.i8const & 0xffffffff);
}
/* Return the higher 32 bits of the 64 bit immediate in INS */
static inline guint32
ins_get_l_high (MonoInst *ins)
{
return (guint32)((ins->data.i8const >> 32) & 0xffffffff);
}
static inline void
mono_inst_set_src_registers (MonoInst *ins, int *regs)
{
ins->sreg1 = regs [0];
ins->sreg2 = regs [1];
ins->sreg3 = regs [2];
}
typedef union {
struct {
guint16 tid; /* tree number */
guint16 bid; /* block number */
} pos ;
guint32 abs_pos;
} MonoPosition;
typedef struct {
MonoPosition first_use, last_use;
} MonoLiveRange;
typedef struct MonoLiveRange2 MonoLiveRange2;
struct MonoLiveRange2 {
int from, to;
MonoLiveRange2 *next;
};
typedef struct {
/* List of live ranges sorted by 'from' */
MonoLiveRange2 *range;
MonoLiveRange2 *last_range;
} MonoLiveInterval;
/*
* Additional information about a variable
*/
struct MonoMethodVar {
guint idx; /* inside cfg->varinfo, cfg->vars */
MonoLiveRange range; /* generated by liveness analysis */
MonoLiveInterval *interval; /* generated by liveness analysis */
int reg; /* != -1 if allocated into a register */
int spill_costs;
MonoBitSet *def_in; /* used by SSA */
MonoInst *def; /* used by SSA */
MonoBasicBlock *def_bb; /* used by SSA */
GList *uses; /* used by SSA */
char cpstate; /* used by SSA conditional constant propagation */
/* The native offsets corresponding to the live range of the variable */
gint32 live_range_start, live_range_end;
/*
* cfg->varinfo [idx]->dreg could be replaced for OP_REGVAR, this contains the
* original vreg.
*/
gint32 vreg;
};
/* Generic sharing */
/*
* Flags for which contexts were used in inflating a generic.
*/
enum {
MONO_GENERIC_CONTEXT_USED_CLASS = 1,
MONO_GENERIC_CONTEXT_USED_METHOD = 2
};
enum {
/* Cannot be 0 since this is stored in rgctx slots, and 0 means an unitialized rgctx slot */
MONO_GSHAREDVT_BOX_TYPE_VTYPE = 1,
MONO_GSHAREDVT_BOX_TYPE_REF = 2,
MONO_GSHAREDVT_BOX_TYPE_NULLABLE = 3
};
typedef enum {
MONO_RGCTX_INFO_STATIC_DATA = 0,
MONO_RGCTX_INFO_KLASS = 1,
MONO_RGCTX_INFO_ELEMENT_KLASS = 2,
MONO_RGCTX_INFO_VTABLE = 3,
MONO_RGCTX_INFO_TYPE = 4,
MONO_RGCTX_INFO_REFLECTION_TYPE = 5,
MONO_RGCTX_INFO_METHOD = 6,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE = 7,
MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER = 8,
MONO_RGCTX_INFO_CLASS_FIELD = 9,
MONO_RGCTX_INFO_METHOD_RGCTX = 10,
MONO_RGCTX_INFO_METHOD_CONTEXT = 11,
MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK = 12,
MONO_RGCTX_INFO_METHOD_DELEGATE_CODE = 13,
MONO_RGCTX_INFO_CAST_CACHE = 14,
MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE = 15,
MONO_RGCTX_INFO_VALUE_SIZE = 16,
/* +1 to avoid zero values in rgctx slots */
MONO_RGCTX_INFO_FIELD_OFFSET = 17,
/* Either the code for a gsharedvt method, or the address for a gsharedvt-out trampoline for the method */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE = 18,
/* Same for virtual calls */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT = 19,
/* Same for calli, associated with a signature */
MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI = 20,
MONO_RGCTX_INFO_SIG_GSHAREDVT_IN_TRAMPOLINE_CALLI = 21,
/* One of MONO_GSHAREDVT_BOX_TYPE */
MONO_RGCTX_INFO_CLASS_BOX_TYPE = 22,
/* Resolves to a MonoGSharedVtMethodRuntimeInfo */
MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO = 23,
MONO_RGCTX_INFO_LOCAL_OFFSET = 24,
MONO_RGCTX_INFO_MEMCPY = 25,
MONO_RGCTX_INFO_BZERO = 26,
/* The address of Nullable<T>.Box () */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_NULLABLE_CLASS_BOX = 27,
MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX = 28,
/* MONO_PATCH_INFO_VCALL_METHOD */
/* In llvmonly mode, this is a function descriptor */
MONO_RGCTX_INFO_VIRT_METHOD_CODE = 29,
/*
* MONO_PATCH_INFO_VCALL_METHOD
* Same as MONO_RGCTX_INFO_CLASS_BOX_TYPE, but for the class
* which implements the method.
*/
MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE = 30,
/* Resolve to 2 (TRUE) or 1 (FALSE) */
MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS = 31,
/* The MonoDelegateTrampInfo instance */
MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO = 32,
/* Same as MONO_PATCH_INFO_METHOD_FTNDESC */
MONO_RGCTX_INFO_METHOD_FTNDESC = 33,
/* mono_type_size () for a class */
MONO_RGCTX_INFO_CLASS_SIZEOF = 34,
/* The InterpMethod for a method */
MONO_RGCTX_INFO_INTERP_METHOD = 35,
/* The llvmonly interp entry for a method */
MONO_RGCTX_INFO_LLVMONLY_INTERP_ENTRY = 36
} MonoRgctxInfoType;
/* How an rgctx is passed to a method */
typedef enum {
MONO_RGCTX_ACCESS_NONE = 0,
/* Loaded from this->vtable->rgctx */
MONO_RGCTX_ACCESS_THIS = 1,
/* Loaded from an additional mrgctx argument */
MONO_RGCTX_ACCESS_MRGCTX = 2,
/* Loaded from an additional vtable argument */
MONO_RGCTX_ACCESS_VTABLE = 3
} MonoRgctxAccess;
typedef struct _MonoRuntimeGenericContextInfoTemplate {
MonoRgctxInfoType info_type;
gpointer data;
struct _MonoRuntimeGenericContextInfoTemplate *next;
} MonoRuntimeGenericContextInfoTemplate;
typedef struct {
MonoClass *next_subclass;
MonoRuntimeGenericContextInfoTemplate *infos;
GSList *method_templates;
} MonoRuntimeGenericContextTemplate;
typedef struct {
MonoVTable *class_vtable; /* must be the first element */
MonoGenericInst *method_inst;
gpointer infos [MONO_ZERO_LEN_ARRAY];
} MonoMethodRuntimeGenericContext;
/* MONO_ABI_SIZEOF () would include the 'infos' field as well */
#define MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT (TARGET_SIZEOF_VOID_P * 2)
#define MONO_RGCTX_SLOT_MAKE_RGCTX(i) (i)
#define MONO_RGCTX_SLOT_MAKE_MRGCTX(i) ((i) | 0x80000000)
#define MONO_RGCTX_SLOT_INDEX(s) ((s) & 0x7fffffff)
#define MONO_RGCTX_SLOT_IS_MRGCTX(s) (((s) & 0x80000000) ? TRUE : FALSE)
#define MONO_GSHAREDVT_DEL_INVOKE_VT_OFFSET -2
typedef struct {
MonoMethod *method;
MonoRuntimeGenericContextInfoTemplate *entries;
int num_entries, count_entries;
} MonoGSharedVtMethodInfo;
/* This is used by gsharedvt methods to allocate locals and compute local offsets */
typedef struct {
int locals_size;
/*
* The results of resolving the entries in MOonGSharedVtMethodInfo->entries.
* We use this instead of rgctx slots since these can be loaded using a load instead
* of a call to an rgctx fetch trampoline.
*/
gpointer entries [MONO_ZERO_LEN_ARRAY];
} MonoGSharedVtMethodRuntimeInfo;
typedef struct
{
MonoClass *klass;
MonoMethod *invoke;
MonoMethod *method;
MonoMethodSignature *invoke_sig;
MonoMethodSignature *sig;
gpointer method_ptr;
gpointer invoke_impl;
gpointer impl_this;
gpointer impl_nothis;
gboolean need_rgctx_tramp;
} MonoDelegateTrampInfo;
/*
* A function descriptor, which is a function address + argument pair.
* In llvm-only mode, these are used instead of trampolines to pass
* extra arguments to runtime functions/methods.
*/
typedef struct
{
gpointer addr;
gpointer arg;
MonoMethod *method;
/* Tagged InterpMethod* */
gpointer interp_method;
} MonoFtnDesc;
typedef enum {
#define PATCH_INFO(a,b) MONO_PATCH_INFO_ ## a,
#include "patch-info.h"
#undef PATCH_INFO
MONO_PATCH_INFO_NUM
} MonoJumpInfoType;
typedef struct MonoJumpInfoRgctxEntry MonoJumpInfoRgctxEntry;
typedef struct MonoJumpInfo MonoJumpInfo;
typedef struct MonoJumpInfoGSharedVtCall MonoJumpInfoGSharedVtCall;
// Subset of MonoJumpInfo.
typedef struct MonoJumpInfoTarget {
MonoJumpInfoType type;
gconstpointer target;
} MonoJumpInfoTarget;
// This ordering is mimiced in MONO_JIT_ICALLS.
typedef enum {
MONO_TRAMPOLINE_JIT = 0,
MONO_TRAMPOLINE_JUMP = 1,
MONO_TRAMPOLINE_RGCTX_LAZY_FETCH = 2,
MONO_TRAMPOLINE_AOT = 3,
MONO_TRAMPOLINE_AOT_PLT = 4,
MONO_TRAMPOLINE_DELEGATE = 5,
MONO_TRAMPOLINE_VCALL = 6,
MONO_TRAMPOLINE_NUM = 7,
} MonoTrampolineType;
// Assuming MONO_TRAMPOLINE_JIT / MONO_JIT_ICALL_generic_trampoline_jit are first.
#if __cplusplus
g_static_assert (MONO_TRAMPOLINE_JIT == 0);
#endif
#define mono_trampoline_type_to_jit_icall_id(a) ((a) + MONO_JIT_ICALL_generic_trampoline_jit)
#define mono_jit_icall_id_to_trampoline_type(a) ((MonoTrampolineType)((a) - MONO_JIT_ICALL_generic_trampoline_jit))
/* These trampolines return normally to their caller */
#define MONO_TRAMPOLINE_TYPE_MUST_RETURN(t) \
((t) == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
/* These trampolines receive an argument directly in a register */
#define MONO_TRAMPOLINE_TYPE_HAS_ARG(t) \
(FALSE)
/* optimization flags */
#define OPTFLAG(id,shift,name,descr) MONO_OPT_ ## id = 1 << shift,
enum {
#include "optflags-def.h"
MONO_OPT_LAST
};
/*
* This structure represents a JIT backend.
*/
typedef struct {
guint have_card_table_wb : 1;
guint have_op_generic_class_init : 1;
guint emulate_mul_div : 1;
guint emulate_div : 1;
guint emulate_long_shift_opts : 1;
guint have_objc_get_selector : 1;
guint have_generalized_imt_trampoline : 1;
gboolean have_op_tailcall_membase : 1;
gboolean have_op_tailcall_reg : 1;
gboolean have_volatile_non_param_register : 1;
guint gshared_supported : 1;
guint use_fpstack : 1;
guint ilp32 : 1;
guint need_got_var : 1;
guint need_div_check : 1;
guint no_unaligned_access : 1;
guint disable_div_with_mul : 1;
guint explicit_null_checks : 1;
guint optimized_div : 1;
guint force_float32 : 1;
int monitor_enter_adjustment;
int dyn_call_param_area;
} MonoBackend;
/* Flags for mini_method_compile () */
typedef enum {
/* Whenever to run cctors during JITting */
JIT_FLAG_RUN_CCTORS = (1 << 0),
/* Whenever this is an AOT compilation */
JIT_FLAG_AOT = (1 << 1),
/* Whenever this is a full AOT compilation */
JIT_FLAG_FULL_AOT = (1 << 2),
/* Whenever to compile with LLVM */
JIT_FLAG_LLVM = (1 << 3),
/* Whenever to disable direct calls to icall functions */
JIT_FLAG_NO_DIRECT_ICALLS = (1 << 4),
/* Emit explicit null checks */
JIT_FLAG_EXPLICIT_NULL_CHECKS = (1 << 5),
/* Whenever to compile in llvm-only mode */
JIT_FLAG_LLVM_ONLY = (1 << 6),
/* Whenever calls to pinvoke functions are made directly */
JIT_FLAG_DIRECT_PINVOKE = (1 << 7),
/* Whenever this is a compile-all run and the result should be discarded */
JIT_FLAG_DISCARD_RESULTS = (1 << 8),
/* Whenever to generate code which can work with the interpreter */
JIT_FLAG_INTERP = (1 << 9),
/* Allow AOT to use all current CPU instructions */
JIT_FLAG_USE_CURRENT_CPU = (1 << 10),
/* Generate code to self-init the method for AOT */
JIT_FLAG_SELF_INIT = (1 << 11),
/* Assume code memory is exec only */
JIT_FLAG_CODE_EXEC_ONLY = (1 << 12),
} JitFlags;
/* Bit-fields in the MonoBasicBlock.region */
#define MONO_REGION_TRY 0
#define MONO_REGION_FINALLY 16
#define MONO_REGION_CATCH 32
#define MONO_REGION_FAULT 64
#define MONO_REGION_FILTER 128
#define MONO_BBLOCK_IS_IN_REGION(bblock, regtype) (((bblock)->region & (0xf << 4)) == (regtype))
#define MONO_REGION_FLAGS(region) ((region) & 0x7)
#define MONO_REGION_CLAUSE_INDEX(region) (((region) >> 8) - 1)
#define get_vreg_to_inst(cfg, vreg) ((vreg) < (cfg)->vreg_to_inst_len ? (cfg)->vreg_to_inst [(vreg)] : NULL)
#define vreg_is_volatile(cfg, vreg) (G_UNLIKELY (get_vreg_to_inst ((cfg), (vreg)) && (get_vreg_to_inst ((cfg), (vreg))->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))))
#define vreg_is_ref(cfg, vreg) ((vreg) < (cfg)->vreg_is_ref_len ? (cfg)->vreg_is_ref [(vreg)] : 0)
#define vreg_is_mp(cfg, vreg) ((vreg) < (cfg)->vreg_is_mp_len ? (cfg)->vreg_is_mp [(vreg)] : 0)
/*
* Control Flow Graph and compilation unit information
*/
typedef struct {
MonoMethod *method;
MonoMethodHeader *header;
MonoMemPool *mempool;
MonoInst **varinfo;
MonoMethodVar *vars;
MonoInst *ret;
MonoBasicBlock *bb_entry;
MonoBasicBlock *bb_exit;
MonoBasicBlock *bb_init;
MonoBasicBlock **bblocks;
MonoBasicBlock **cil_offset_to_bb;
MonoMemPool *state_pool; /* used by instruction selection */
MonoBasicBlock *cbb; /* used by instruction selection */
MonoInst *prev_ins; /* in decompose */
MonoJumpInfo *patch_info;
MonoJitInfo *jit_info;
MonoJitDynamicMethodInfo *dynamic_info;
guint num_bblocks, max_block_num;
guint locals_start;
guint num_varinfo; /* used items in varinfo */
guint varinfo_count; /* total storage in varinfo */
gint stack_offset;
gint max_ireg;
gint cil_offset_to_bb_len;
MonoRegState *rs;
MonoSpillInfo *spill_info [16]; /* machine register spills */
gint spill_count;
gint spill_info_len [16];
/* unsigned char *cil_code; */
MonoInst *got_var; /* Global Offset Table variable */
MonoInst **locals;
/* Variable holding the mrgctx/vtable address for gshared methods */
MonoInst *rgctx_var;
MonoInst **args;
MonoType **arg_types;
MonoMethod *current_method; /* The method currently processed by method_to_ir () */
MonoMethod *method_to_register; /* The method to register in JIT info tables */
MonoGenericContext *generic_context;
MonoInst *this_arg;
MonoBackend *backend;
/*
* This variable represents the hidden argument holding the vtype
* return address. If the method returns something other than a vtype, or
* the vtype is returned in registers this is NULL.
*/
MonoInst *vret_addr;
/*
* This is used to initialize the cil_code field of MonoInst's.
*/
const unsigned char *ip;
struct MonoAliasingInformation *aliasing_info;
/* A hashtable of region ID-> SP var mappings */
/* An SP var is a place to store the stack pointer (used by handlers)*/
/*
* FIXME We can potentially get rid of this, since it was mainly used
* for hijacking return address for handler.
*/
GHashTable *spvars;
/*
* A hashtable of region ID -> EX var mappings
* An EX var stores the exception object passed to catch/filter blocks
* For finally blocks, it is set to TRUE if we should throw an abort
* once the execution of the finally block is over.
*/
GHashTable *exvars;
GList *ldstr_list; /* used by AOT */
guint real_offset;
GHashTable *cbb_hash;
/* The current virtual register number */
guint32 next_vreg;
MonoRgctxAccess rgctx_access;
MonoGenericSharingContext gsctx;
MonoGenericContext *gsctx_context;
MonoGSharedVtMethodInfo *gsharedvt_info;
gpointer jit_mm;
MonoMemoryManager *mem_manager;
/* Points to the gsharedvt locals area at runtime */
MonoInst *gsharedvt_locals_var;
/* The localloc instruction used to initialize gsharedvt_locals_var */
MonoInst *gsharedvt_locals_var_ins;
/* Points to a MonoGSharedVtMethodRuntimeInfo at runtime */
MonoInst *gsharedvt_info_var;
/* For native-to-managed wrappers, CEE_MONO_JIT_(AT|DE)TACH opcodes */
MonoInst *orig_domain_var;
MonoInst *lmf_var;
MonoInst *lmf_addr_var;
MonoInst *il_state_var;
MonoInst *stack_inbalance_var;
unsigned char *cil_start;
unsigned char *native_code;
guint code_size;
guint code_len;
guint prolog_end;
guint epilog_begin;
guint epilog_end;
regmask_t used_int_regs;
guint32 opt;
guint32 flags;
guint32 comp_done;
guint32 verbose_level;
guint32 stack_usage;
guint32 param_area;
guint32 frame_reg;
gint32 sig_cookie;
guint disable_aot : 1;
guint disable_ssa : 1;
guint disable_llvm : 1;
guint enable_extended_bblocks : 1;
guint run_cctors : 1;
guint need_lmf_area : 1;
guint compile_aot : 1;
guint full_aot : 1;
guint compile_llvm : 1;
guint got_var_allocated : 1;
guint ret_var_is_local : 1;
guint ret_var_set : 1;
guint unverifiable : 1;
guint skip_visibility : 1;
guint disable_llvm_implicit_null_checks : 1;
guint disable_reuse_registers : 1;
guint disable_reuse_stack_slots : 1;
guint disable_reuse_ref_stack_slots : 1;
guint disable_ref_noref_stack_slot_share : 1;
guint disable_initlocals_opt : 1;
guint disable_initlocals_opt_refs : 1;
guint disable_omit_fp : 1;
guint disable_vreg_to_lvreg : 1;
guint disable_deadce_vars : 1;
guint disable_out_of_line_bblocks : 1;
guint disable_direct_icalls : 1;
guint disable_gc_safe_points : 1;
guint direct_pinvoke : 1;
guint create_lmf_var : 1;
/*
* When this is set, the code to push/pop the LMF from the LMF stack is generated as IR
* instead of being generated in emit_prolog ()/emit_epilog ().
*/
guint lmf_ir : 1;
guint gen_write_barriers : 1;
guint init_ref_vars : 1;
guint extend_live_ranges : 1;
guint compute_precise_live_ranges : 1;
guint has_got_slots : 1;
guint uses_rgctx_reg : 1;
guint uses_vtable_reg : 1;
guint keep_cil_nops : 1;
guint gen_seq_points : 1;
/* Generate seq points for use by the debugger */
guint gen_sdb_seq_points : 1;
guint explicit_null_checks : 1;
guint compute_gc_maps : 1;
guint soft_breakpoints : 1;
guint arch_eh_jit_info : 1;
guint has_calls : 1;
guint has_emulated_ops : 1;
guint has_indirection : 1;
guint has_atomic_add_i4 : 1;
guint has_atomic_exchange_i4 : 1;
guint has_atomic_cas_i4 : 1;
guint check_pinvoke_callconv : 1;
guint has_unwind_info_for_epilog : 1;
guint disable_inline : 1;
/* Disable inlining into caller */
guint no_inline : 1;
guint gshared : 1;
guint gsharedvt : 1;
guint r4fp : 1;
guint llvm_only : 1;
guint interp : 1;
guint use_current_cpu : 1;
guint self_init : 1;
guint code_exec_only : 1;
guint interp_entry_only : 1;
guint after_method_to_ir : 1;
guint disable_inline_rgctx_fetch : 1;
guint deopt : 1;
guint8 uses_simd_intrinsics;
int r4_stack_type;
gpointer debug_info;
guint32 lmf_offset;
guint16 *intvars;
MonoProfilerCoverageInfo *coverage_info;
GHashTable *token_info_hash;
MonoCompileArch arch;
guint32 inline_depth;
/* Size of memory reserved for thunks */
int thunk_area;
/* Thunks */
guint8 *thunks;
/* Offset between the start of code and the thunks area */
int thunks_offset;
MonoExceptionType exception_type; /* MONO_EXCEPTION_* */
guint32 exception_data;
char* exception_message;
gpointer exception_ptr;
guint8 * encoded_unwind_ops;
guint32 encoded_unwind_ops_len;
GSList* unwind_ops;
GList* dont_inline;
/* Fields used by the local reg allocator */
void* reginfo;
int reginfo_len;
/* Maps vregs to their associated MonoInst's */
/* vregs with an associated MonoInst are 'global' while others are 'local' */
MonoInst **vreg_to_inst;
/* Size of above array */
guint32 vreg_to_inst_len;
/* Marks vregs which hold a GC ref */
/* FIXME: Use a bitmap */
gboolean *vreg_is_ref;
/* Size of above array */
guint32 vreg_is_ref_len;
/* Marks vregs which hold a managed pointer */
/* FIXME: Use a bitmap */
gboolean *vreg_is_mp;
/* Size of above array */
guint32 vreg_is_mp_len;
/*
* The original method to compile, differs from 'method' when doing generic
* sharing.
*/
MonoMethod *orig_method;
/* Patches which describe absolute addresses embedded into the native code */
GHashTable *abs_patches;
/* Used to implement move_i4_to_f on archs that can't do raw
copy between an ireg and a freg. This is an int32 var.*/
MonoInst *iconv_raw_var;
/* Used to implement fconv_to_r8_x. This is a double (8 bytes) var.*/
MonoInst *fconv_to_r8_x_var;
/*Use to implement simd constructors. This is a vector (16 bytes) var.*/
MonoInst *simd_ctor_var;
/* Used to implement dyn_call */
MonoInst *dyn_call_var;
MonoInst *last_seq_point;
/*
* List of sequence points represented as IL offset+native offset pairs.
* Allocated using glib.
* IL offset can be -1 or 0xffffff to refer to the sequence points
* inside the prolog and epilog used to implement method entry/exit events.
*/
GPtrArray *seq_points;
/* The encoded sequence point info */
struct MonoSeqPointInfo *seq_point_info;
/* Method headers which need to be freed after compilation */
GSList *headers_to_free;
/* Used by AOT */
guint32 got_offset, ex_info_offset, method_info_offset, method_index;
guint32 aot_method_flags;
/* For llvm */
guint32 got_access_count;
gpointer llvmonly_init_cond;
gpointer llvm_dummy_info_var, llvm_info_var;
/* Symbol used to refer to this method in generated assembly */
char *asm_symbol;
char *asm_debug_symbol;
char *llvm_method_name;
int castclass_cache_index;
MonoJitExceptionInfo *llvm_ex_info;
guint32 llvm_ex_info_len;
int llvm_this_reg, llvm_this_offset;
GSList *try_block_holes;
/* DWARF location list for 'this' */
GSList *this_loclist;
/* DWARF location list for 'rgctx_var' */
GSList *rgctx_loclist;
int *gsharedvt_vreg_to_idx;
GSList *signatures;
GSList *interp_in_signatures;
/* GC Maps */
/* The offsets of the locals area relative to the frame pointer */
gint locals_min_stack_offset, locals_max_stack_offset;
/* The current CFA rule */
int cur_cfa_reg, cur_cfa_offset;
/* The final CFA rule at the end of the prolog */
int cfa_reg, cfa_offset;
/* Points to a MonoCompileGC */
gpointer gc_info;
/*
* The encoded GC map along with its size. This contains binary data so it can be saved in an AOT
* image etc, but it requires a 4 byte alignment.
*/
guint8 *gc_map;
guint32 gc_map_size;
/* Error handling */
MonoError* error;
MonoErrorInternal error_value;
/* pointer to context datastructure used for graph dumping */
MonoGraphDumper *gdump_ctx;
gboolean *clause_is_dead;
/* Stats */
int stat_allocate_var;
int stat_locals_stack_size;
int stat_basic_blocks;
int stat_cil_code_size;
int stat_n_regvars;
int stat_inlineable_methods;
int stat_inlined_methods;
int stat_code_reallocs;
MonoProfilerCallInstrumentationFlags prof_flags;
gboolean prof_coverage;
/* For deduplication */
gboolean skip;
} MonoCompile;
#define MONO_CFG_PROFILE(cfg, flag) \
G_UNLIKELY ((cfg)->prof_flags & MONO_PROFILER_CALL_INSTRUMENTATION_ ## flag)
#define MONO_CFG_PROFILE_CALL_CONTEXT(cfg) \
(MONO_CFG_PROFILE (cfg, ENTER_CONTEXT) || MONO_CFG_PROFILE (cfg, LEAVE_CONTEXT))
typedef enum {
MONO_CFG_HAS_ALLOCA = 1 << 0,
MONO_CFG_HAS_CALLS = 1 << 1,
MONO_CFG_HAS_LDELEMA = 1 << 2,
MONO_CFG_HAS_VARARGS = 1 << 3,
MONO_CFG_HAS_TAILCALL = 1 << 4,
MONO_CFG_HAS_FPOUT = 1 << 5, /* there are fp values passed in int registers */
MONO_CFG_HAS_SPILLUP = 1 << 6, /* spill var slots are allocated from bottom to top */
MONO_CFG_HAS_CHECK_THIS = 1 << 7,
MONO_CFG_NEEDS_DECOMPOSE = 1 << 8,
MONO_CFG_HAS_TYPE_CHECK = 1 << 9
} MonoCompileFlags;
typedef enum {
MONO_CFG_USES_SIMD_INTRINSICS = 1 << 0,
MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION = 1 << 1
} MonoSimdIntrinsicsFlags;
typedef struct {
gint32 methods_compiled;
gint32 methods_aot;
gint32 methods_aot_llvm;
gint32 methods_lookups;
gint32 allocate_var;
gint32 cil_code_size;
gint32 native_code_size;
gint32 code_reallocs;
gint32 max_code_size_ratio;
gint32 biggest_method_size;
gint32 allocated_code_size;
gint32 allocated_seq_points_size;
gint32 inlineable_methods;
gint32 inlined_methods;
gint32 basic_blocks;
gint32 max_basic_blocks;
gint32 locals_stack_size;
gint32 regvars;
gint32 generic_virtual_invocations;
gint32 alias_found;
gint32 alias_removed;
gint32 loads_eliminated;
gint32 stores_eliminated;
gint32 optimized_divisions;
gint32 methods_with_llvm;
gint32 methods_without_llvm;
gint32 methods_with_interp;
char *max_ratio_method;
char *biggest_method;
gint64 jit_method_to_ir;
gint64 jit_liveness_handle_exception_clauses;
gint64 jit_handle_out_of_line_bblock;
gint64 jit_decompose_long_opts;
gint64 jit_decompose_typechecks;
gint64 jit_local_cprop;
gint64 jit_local_emulate_ops;
gint64 jit_optimize_branches;
gint64 jit_handle_global_vregs;
gint64 jit_local_deadce;
gint64 jit_local_alias_analysis;
gint64 jit_if_conversion;
gint64 jit_bb_ordering;
gint64 jit_compile_dominator_info;
gint64 jit_compute_natural_loops;
gint64 jit_insert_safepoints;
gint64 jit_ssa_compute;
gint64 jit_ssa_cprop;
gint64 jit_ssa_deadce;
gint64 jit_perform_abc_removal;
gint64 jit_ssa_remove;
gint64 jit_local_cprop2;
gint64 jit_handle_global_vregs2;
gint64 jit_local_deadce2;
gint64 jit_optimize_branches2;
gint64 jit_decompose_vtype_opts;
gint64 jit_decompose_array_access_opts;
gint64 jit_liveness_handle_exception_clauses2;
gint64 jit_analyze_liveness;
gint64 jit_linear_scan;
gint64 jit_arch_allocate_vars;
gint64 jit_spill_global_vars;
gint64 jit_local_cprop3;
gint64 jit_local_deadce3;
gint64 jit_codegen;
gint64 jit_create_jit_info;
gint64 jit_gc_create_gc_map;
gint64 jit_save_seq_point_info;
gint64 jit_time;
gboolean enabled;
} MonoJitStats;
extern MonoJitStats mono_jit_stats;
static inline void
get_jit_stats (gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time)
{
*methods_compiled = mono_jit_stats.methods_compiled;
*cil_code_size_bytes = mono_jit_stats.cil_code_size;
*native_code_size_bytes = mono_jit_stats.native_code_size;
*jit_time = mono_jit_stats.jit_time;
}
guint32
mono_get_exception_count (void);
static inline void
get_exception_stats (guint32 *exception_count)
{
*exception_count = mono_get_exception_count ();
}
/* opcodes: value assigned after all the CIL opcodes */
#ifdef MINI_OP
#undef MINI_OP
#endif
#ifdef MINI_OP3
#undef MINI_OP3
#endif
#define MINI_OP(a,b,dest,src1,src2) a,
#define MINI_OP3(a,b,dest,src1,src2,src3) a,
enum {
OP_START = MONO_CEE_LAST - 1,
#include "mini-ops.h"
OP_LAST
};
#undef MINI_OP
#undef MINI_OP3
#if TARGET_SIZEOF_VOID_P == 8
#define OP_PCONST OP_I8CONST
#define OP_DUMMY_PCONST OP_DUMMY_I8CONST
#define OP_PADD OP_LADD
#define OP_PADD_IMM OP_LADD_IMM
#define OP_PSUB_IMM OP_LSUB_IMM
#define OP_PAND_IMM OP_LAND_IMM
#define OP_PXOR_IMM OP_LXOR_IMM
#define OP_PSUB OP_LSUB
#define OP_PMUL OP_LMUL
#define OP_PMUL_IMM OP_LMUL_IMM
#define OP_POR_IMM OP_LOR_IMM
#define OP_PNEG OP_LNEG
#define OP_PCONV_TO_I1 OP_LCONV_TO_I1
#define OP_PCONV_TO_U1 OP_LCONV_TO_U1
#define OP_PCONV_TO_I2 OP_LCONV_TO_I2
#define OP_PCONV_TO_U2 OP_LCONV_TO_U2
#define OP_PCONV_TO_OVF_I1_UN OP_LCONV_TO_OVF_I1_UN
#define OP_PCONV_TO_OVF_I1 OP_LCONV_TO_OVF_I1
#define OP_PBEQ OP_LBEQ
#define OP_PCEQ OP_LCEQ
#define OP_PCLT OP_LCLT
#define OP_PCGT OP_LCGT
#define OP_PCLT_UN OP_LCLT_UN
#define OP_PCGT_UN OP_LCGT_UN
#define OP_PBNE_UN OP_LBNE_UN
#define OP_PBGE_UN OP_LBGE_UN
#define OP_PBLT_UN OP_LBLT_UN
#define OP_PBGE OP_LBGE
#define OP_STOREP_MEMBASE_REG OP_STOREI8_MEMBASE_REG
#define OP_STOREP_MEMBASE_IMM OP_STOREI8_MEMBASE_IMM
#else
#define OP_PCONST OP_ICONST
#define OP_DUMMY_PCONST OP_DUMMY_ICONST
#define OP_PADD OP_IADD
#define OP_PADD_IMM OP_IADD_IMM
#define OP_PSUB_IMM OP_ISUB_IMM
#define OP_PAND_IMM OP_IAND_IMM
#define OP_PXOR_IMM OP_IXOR_IMM
#define OP_PSUB OP_ISUB
#define OP_PMUL OP_IMUL
#define OP_PMUL_IMM OP_IMUL_IMM
#define OP_POR_IMM OP_IOR_IMM
#define OP_PNEG OP_INEG
#define OP_PCONV_TO_I1 OP_ICONV_TO_I1
#define OP_PCONV_TO_U1 OP_ICONV_TO_U1
#define OP_PCONV_TO_I2 OP_ICONV_TO_I2
#define OP_PCONV_TO_U2 OP_ICONV_TO_U2
#define OP_PCONV_TO_OVF_I1_UN OP_ICONV_TO_OVF_I1_UN
#define OP_PCONV_TO_OVF_I1 OP_ICONV_TO_OVF_I1
#define OP_PBEQ OP_IBEQ
#define OP_PCEQ OP_ICEQ
#define OP_PCLT OP_ICLT
#define OP_PCGT OP_ICGT
#define OP_PCLT_UN OP_ICLT_UN
#define OP_PCGT_UN OP_ICGT_UN
#define OP_PBNE_UN OP_IBNE_UN
#define OP_PBGE_UN OP_IBGE_UN
#define OP_PBLT_UN OP_IBLT_UN
#define OP_PBGE OP_IBGE
#define OP_STOREP_MEMBASE_REG OP_STOREI4_MEMBASE_REG
#define OP_STOREP_MEMBASE_IMM OP_STOREI4_MEMBASE_IMM
#endif
/* Opcodes to load/store regsize quantities */
#if defined (MONO_ARCH_ILP32)
#define OP_LOADR_MEMBASE OP_LOADI8_MEMBASE
#define OP_STORER_MEMBASE_REG OP_STOREI8_MEMBASE_REG
#else
#define OP_LOADR_MEMBASE OP_LOAD_MEMBASE
#define OP_STORER_MEMBASE_REG OP_STORE_MEMBASE_REG
#endif
typedef enum {
STACK_INV,
STACK_I4,
STACK_I8,
STACK_PTR,
STACK_R8,
STACK_MP,
STACK_OBJ,
STACK_VTYPE,
STACK_R4,
STACK_MAX
} MonoStackType;
typedef struct {
union {
double r8;
gint32 i4;
gint64 i8;
gpointer p;
MonoClass *klass;
} data;
int type;
} StackSlot;
extern const MonoInstSpec MONO_ARCH_CPU_SPEC [];
#define MONO_ARCH_CPU_SPEC_IDX_COMBINE(a) a ## _idx
#define MONO_ARCH_CPU_SPEC_IDX(a) MONO_ARCH_CPU_SPEC_IDX_COMBINE(a)
extern const guint16 MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC) [];
#define ins_get_spec(op) ((const char*)&MONO_ARCH_CPU_SPEC [MONO_ARCH_CPU_SPEC_IDX(MONO_ARCH_CPU_SPEC)[(op) - OP_LOAD]])
#ifndef DISABLE_JIT
static inline int
ins_get_size (int opcode)
{
return ((guint8 *)ins_get_spec (opcode))[MONO_INST_LEN];
}
guint8*
mini_realloc_code_slow (MonoCompile *cfg, int size);
static inline guint8*
realloc_code (MonoCompile *cfg, int size)
{
const int EXTRA_CODE_SPACE = 16;
const int code_len = cfg->code_len;
if (G_UNLIKELY ((guint)(code_len + size) > (cfg->code_size - EXTRA_CODE_SPACE)))
return mini_realloc_code_slow (cfg, size);
return cfg->native_code + code_len;
}
static inline void
set_code_len (MonoCompile *cfg, int len)
{
g_assert ((guint)len <= cfg->code_size);
cfg->code_len = len;
}
static inline void
set_code_cursor (MonoCompile *cfg, void* void_code)
{
guint8* code = (guint8*)void_code;
g_assert (code <= (cfg->native_code + cfg->code_size));
set_code_len (cfg, code - cfg->native_code);
}
#endif
enum {
MONO_COMP_DOM = 1,
MONO_COMP_IDOM = 2,
MONO_COMP_DFRONTIER = 4,
MONO_COMP_DOM_REV = 8,
MONO_COMP_LIVENESS = 16,
MONO_COMP_SSA = 32,
MONO_COMP_SSA_DEF_USE = 64,
MONO_COMP_REACHABILITY = 128,
MONO_COMP_LOOPS = 256
};
typedef enum {
MONO_GRAPH_CFG = 1,
MONO_GRAPH_DTREE = 2,
MONO_GRAPH_CFG_CODE = 4,
MONO_GRAPH_CFG_SSA = 8,
MONO_GRAPH_CFG_OPTCODE = 16
} MonoGraphOptions;
typedef struct {
guint16 size;
guint16 offset;
guint8 pad;
} MonoJitArgumentInfo;
enum {
BRANCH_NOT_TAKEN,
BRANCH_TAKEN,
BRANCH_UNDEF
};
typedef enum {
CMP_EQ,
CMP_NE,
CMP_LE,
CMP_GE,
CMP_LT,
CMP_GT,
CMP_LE_UN,
CMP_GE_UN,
CMP_LT_UN,
CMP_GT_UN,
CMP_ORD,
CMP_UNORD
} CompRelation;
typedef enum {
CMP_TYPE_L,
CMP_TYPE_I,
CMP_TYPE_F
} CompType;
/* Implicit exceptions */
enum {
MONO_EXC_INDEX_OUT_OF_RANGE,
MONO_EXC_OVERFLOW,
MONO_EXC_ARITHMETIC,
MONO_EXC_DIVIDE_BY_ZERO,
MONO_EXC_INVALID_CAST,
MONO_EXC_NULL_REF,
MONO_EXC_ARRAY_TYPE_MISMATCH,
MONO_EXC_ARGUMENT,
MONO_EXC_ARGUMENT_OUT_OF_RANGE,
MONO_EXC_ARGUMENT_OUT_OF_MEMORY,
MONO_EXC_INTRINS_NUM
};
/*
* Information about a trampoline function.
*/
struct MonoTrampInfo
{
/*
* The native code of the trampoline. Not owned by this structure.
*/
guint8 *code;
guint32 code_size;
/*
* The name of the trampoline which can be used in AOT/xdebug. Owned by this
* structure.
*/
char *name;
/*
* Patches required by the trampoline when aot-ing. Owned by this structure.
*/
MonoJumpInfo *ji;
/*
* Unwind information. Owned by this structure.
*/
GSList *unwind_ops;
MonoJitICallInfo *jit_icall_info;
/*
* The method the trampoline is associated with, if any.
*/
MonoMethod *method;
/*
* Encoded unwind info loaded from AOT images
*/
guint8 *uw_info;
guint32 uw_info_len;
/* Whenever uw_info is owned by this structure */
gboolean owns_uw_info;
};
typedef void (*MonoInstFunc) (MonoInst *tree, gpointer data);
enum {
FILTER_IL_SEQ_POINT = 1 << 0,
FILTER_NOP = 1 << 1,
};
static inline gboolean
mono_inst_filter (MonoInst *ins, int filter)
{
if (!ins || !filter)
return FALSE;
if ((filter & FILTER_IL_SEQ_POINT) && ins->opcode == OP_IL_SEQ_POINT)
return TRUE;
if ((filter & FILTER_NOP) && ins->opcode == OP_NOP)
return TRUE;
return FALSE;
}
static inline MonoInst*
mono_inst_next (MonoInst *ins, int filter)
{
do {
ins = ins->next;
} while (mono_inst_filter (ins, filter));
return ins;
}
static inline MonoInst*
mono_inst_prev (MonoInst *ins, int filter)
{
do {
ins = ins->prev;
} while (mono_inst_filter (ins, filter));
return ins;
}
static inline MonoInst*
mono_bb_first_inst (MonoBasicBlock *bb, int filter)
{
MonoInst *ins = bb->code;
if (mono_inst_filter (ins, filter))
ins = mono_inst_next (ins, filter);
return ins;
}
static inline MonoInst*
mono_bb_last_inst (MonoBasicBlock *bb, int filter)
{
MonoInst *ins = bb->last_ins;
if (mono_inst_filter (ins, filter))
ins = mono_inst_prev (ins, filter);
return ins;
}
/* profiler support */
void mini_add_profiler_argument (const char *desc);
void mini_profiler_emit_enter (MonoCompile *cfg);
void mini_profiler_emit_leave (MonoCompile *cfg, MonoInst *ret);
void mini_profiler_emit_tail_call (MonoCompile *cfg, MonoMethod *target);
void mini_profiler_emit_call_finally (MonoCompile *cfg, MonoMethodHeader *header, unsigned char *ip, guint32 index, MonoExceptionClause *clause);
void mini_profiler_context_enable (void);
gpointer mini_profiler_context_get_this (MonoProfilerCallContext *ctx);
gpointer mini_profiler_context_get_argument (MonoProfilerCallContext *ctx, guint32 pos);
gpointer mini_profiler_context_get_local (MonoProfilerCallContext *ctx, guint32 pos);
gpointer mini_profiler_context_get_result (MonoProfilerCallContext *ctx);
void mini_profiler_context_free_buffer (gpointer buffer);
/* graph dumping */
void mono_cfg_dump_create_context (MonoCompile *cfg);
void mono_cfg_dump_begin_group (MonoCompile *cfg);
void mono_cfg_dump_close_group (MonoCompile *cfg);
void mono_cfg_dump_ir (MonoCompile *cfg, const char *phase_name);
/* helper methods */
MonoInst* mono_find_spvar_for_region (MonoCompile *cfg, int region);
MonoInst* mono_find_exvar_for_offset (MonoCompile *cfg, int offset);
int mono_get_block_region_notry (MonoCompile *cfg, int region);
void mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst);
void mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert);
void mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert);
void mono_verify_bblock (MonoBasicBlock *bb);
void mono_verify_cfg (MonoCompile *cfg);
void mono_constant_fold (MonoCompile *cfg);
MonoInst* mono_constant_fold_ins (MonoCompile *cfg, MonoInst *ins, MonoInst *arg1, MonoInst *arg2, gboolean overwrite);
int mono_eval_cond_branch (MonoInst *branch);
int mono_is_power_of_two (guint32 val);
void mono_cprop_local (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **acp, int acp_size);
MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode);
MonoInst* mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg);
void mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index);
MonoInst* mini_get_int_to_float_spill_area (MonoCompile *cfg);
MonoType* mono_type_from_stack_type (MonoInst *ins);
guint32 mono_alloc_ireg (MonoCompile *cfg);
guint32 mono_alloc_lreg (MonoCompile *cfg);
guint32 mono_alloc_freg (MonoCompile *cfg);
guint32 mono_alloc_preg (MonoCompile *cfg);
guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type);
guint32 mono_alloc_ireg_ref (MonoCompile *cfg);
guint32 mono_alloc_ireg_mp (MonoCompile *cfg);
guint32 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg);
void mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg);
void mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg);
void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to);
void mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to);
gboolean mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2);
void mono_remove_bblock (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_nullify_basic_block (MonoBasicBlock *bb);
void mono_merge_basic_blocks (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *bbn);
void mono_optimize_branches (MonoCompile *cfg);
void mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom);
void mono_print_ins_index (int i, MonoInst *ins);
GString *mono_print_ins_index_strbuf (int i, MonoInst *ins);
void mono_print_ins (MonoInst *ins);
void mono_print_bb (MonoBasicBlock *bb, const char *msg);
void mono_print_code (MonoCompile *cfg, const char *msg);
const char* mono_inst_name (int op);
int mono_op_to_op_imm (int opcode);
int mono_op_imm_to_op (int opcode);
int mono_load_membase_to_load_mem (int opcode);
gboolean mono_op_no_side_effects (int opcode);
gboolean mono_ins_no_side_effects (MonoInst *ins);
guint mono_type_to_load_membase (MonoCompile *cfg, MonoType *type);
guint mono_type_to_store_membase (MonoCompile *cfg, MonoType *type);
guint32 mono_type_to_stloc_coerce (MonoType *type);
guint mini_type_to_stind (MonoCompile* cfg, MonoType *type);
MonoStackType mini_type_to_stack_type (MonoCompile *cfg, MonoType *t);
MonoJitInfo* mini_lookup_method (MonoMethod *method, MonoMethod *shared);
guint32 mono_reverse_branch_op (guint32 opcode);
void mono_disassemble_code (MonoCompile *cfg, guint8 *code, int size, char *id);
MonoJumpInfoTarget mono_call_to_patch (MonoCallInst *call);
void mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip);
void mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target);
void mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation);
void mono_remove_patch_info (MonoCompile *cfg, int ip);
gpointer mono_jit_compile_method_inner (MonoMethod *method, int opt, MonoError *error);
GList *mono_varlist_insert_sorted (MonoCompile *cfg, GList *list, MonoMethodVar *mv, int sort_type);
GList *mono_varlist_sort (MonoCompile *cfg, GList *list, int sort_type);
void mono_analyze_liveness (MonoCompile *cfg);
void mono_analyze_liveness_gc (MonoCompile *cfg);
void mono_linear_scan (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask);
void mono_global_regalloc (MonoCompile *cfg);
void mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks);
MonoCompile *mini_method_compile (MonoMethod *method, guint32 opts, JitFlags flags, int parts, int aot_method_index);
void mono_destroy_compile (MonoCompile *cfg);
void mono_empty_compile (MonoCompile *cfg);
MonoJitICallInfo *mono_find_jit_opcode_emulation (int opcode);
void mono_print_ins_index (int i, MonoInst *ins);
void mono_print_ins (MonoInst *ins);
gboolean mini_assembly_can_skip_verification (MonoMethod *method);
MonoInst *mono_get_got_var (MonoCompile *cfg);
void mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset);
void mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to);
MonoInst* mono_emit_jit_icall_id (MonoCompile *cfg, MonoJitICallId jit_icall_id, MonoInst **args);
#define mono_emit_jit_icall(cfg, name, args) (mono_emit_jit_icall_id ((cfg), MONO_JIT_ICALL_ ## name, (args)))
MonoInst* mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args);
MonoInst* mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins);
gboolean mini_should_insert_breakpoint (MonoMethod *method);
int mono_target_pagesize (void);
gboolean mini_class_is_system_array (MonoClass *klass);
void mono_linterval_add_range (MonoCompile *cfg, MonoLiveInterval *interval, int from, int to);
void mono_linterval_print (MonoLiveInterval *interval);
void mono_linterval_print_nl (MonoLiveInterval *interval);
gboolean mono_linterval_covers (MonoLiveInterval *interval, int pos);
gint32 mono_linterval_get_intersect_pos (MonoLiveInterval *i1, MonoLiveInterval *i2);
void mono_linterval_split (MonoCompile *cfg, MonoLiveInterval *interval, MonoLiveInterval **i1, MonoLiveInterval **i2, int pos);
void mono_liveness_handle_exception_clauses (MonoCompile *cfg);
gpointer mono_realloc_native_code (MonoCompile *cfg);
void mono_register_opcode_emulation (int opcode, const char* name, MonoMethodSignature *sig, gpointer func, gboolean no_throw);
void mono_draw_graph (MonoCompile *cfg, MonoGraphOptions draw_options);
void mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst);
void mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb);
void mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_throw);
#ifdef __cplusplus
template <typename T>
inline void
mini_register_opcode_emulation (int opcode, MonoJitICallInfo *jit_icall_info, const char *name, MonoMethodSignature *sig, T func, const char *symbol, gboolean no_throw)
{
mini_register_opcode_emulation (opcode, jit_icall_info, name, sig, (gpointer)func, symbol, no_throw);
}
#endif // __cplusplus
void mono_trampolines_init (void);
guint8 * mono_get_trampoline_code (MonoTrampolineType tramp_type);
gpointer mono_create_specific_trampoline (MonoMemoryManager *mem_manager, gpointer arg1, MonoTrampolineType tramp_type, guint32 *code_len);
gpointer mono_create_jump_trampoline (MonoMethod *method,
gboolean add_sync_wrapper,
MonoError *error);
gpointer mono_create_jit_trampoline (MonoMethod *method, MonoError *error);
gpointer mono_create_jit_trampoline_from_token (MonoImage *image, guint32 token);
gpointer mono_create_delegate_trampoline (MonoClass *klass);
MonoDelegateTrampInfo* mono_create_delegate_trampoline_info (MonoClass *klass, MonoMethod *method);
gpointer mono_create_delegate_virtual_trampoline (MonoClass *klass, MonoMethod *method);
gpointer mono_create_rgctx_lazy_fetch_trampoline (guint32 offset);
gpointer mono_create_static_rgctx_trampoline (MonoMethod *m, gpointer addr);
gpointer mono_create_ftnptr_arg_trampoline (gpointer arg, gpointer addr);
guint32 mono_find_rgctx_lazy_fetch_trampoline_by_addr (gconstpointer addr);
gpointer mono_magic_trampoline (host_mgreg_t *regs, guint8 *code, gpointer arg, guint8* tramp);
gpointer mono_delegate_trampoline (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp);
gpointer mono_aot_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info,
guint8* tramp);
gpointer mono_aot_plt_trampoline (host_mgreg_t *regs, guint8 *code, guint8 *token_info,
guint8* tramp);
gconstpointer mono_get_trampoline_func (MonoTrampolineType tramp_type);
gpointer mini_get_vtable_trampoline (MonoVTable *vt, int slot_index);
const char* mono_get_generic_trampoline_simple_name (MonoTrampolineType tramp_type);
const char* mono_get_generic_trampoline_name (MonoTrampolineType tramp_type);
char* mono_get_rgctx_fetch_trampoline_name (int slot);
gpointer mini_get_single_step_trampoline (void);
gpointer mini_get_breakpoint_trampoline (void);
gpointer mini_add_method_trampoline (MonoMethod *m, gpointer compiled_method, gboolean add_static_rgctx_tramp, gboolean add_unbox_tramp);
gboolean mini_jit_info_is_gsharedvt (MonoJitInfo *ji);
gpointer* mini_resolve_imt_method (MonoVTable *vt, gpointer *vtable_slot, MonoMethod *imt_method, MonoMethod **impl_method, gpointer *out_aot_addr,
gboolean *out_need_rgctx_tramp, MonoMethod **variant_iface,
MonoError *error);
void* mono_global_codeman_reserve (int size);
#define mono_global_codeman_reserve(size) (g_cast (mono_global_codeman_reserve ((size))))
void mono_global_codeman_foreach (MonoCodeManagerFunc func, void *user_data);
const char *mono_regname_full (int reg, int bank);
gint32* mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align);
void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb);
MonoInst *mono_branch_optimize_exception_target (MonoCompile *cfg, MonoBasicBlock *bb, const char * exname);
void mono_remove_critical_edges (MonoCompile *cfg);
gboolean mono_is_regsize_var (MonoType *t);
MonoJumpInfo * mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target);
int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass);
int mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method);
void mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2);
void mini_set_inline_failure (MonoCompile *cfg, const char *msg);
void mini_test_tailcall (MonoCompile *cfg, gboolean tailcall);
gboolean mini_should_check_stack_pointer (MonoCompile *cfg);
MonoInst* mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used);
void mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
void mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align);
void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
void mini_emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype);
int mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index);
MonoInst* mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck, gboolean bounded);
MonoInst* mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
MonoInst* mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
void mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig);
MonoCallInst * mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall,
gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target);
MonoInst* mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
MonoInst* mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr,
MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall);
MonoInst* mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall,
MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg);
MonoInst* mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
MonoMethodSignature *sig, MonoInst **args);
MonoInst* mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target);
MonoInst* mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr);
MonoInst* mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
MonoInst* mini_emit_memory_barrier (MonoCompile *cfg, int kind);
MonoInst* mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value);
void mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value);
MonoInst* mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag);
void mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag);
void mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag);
void mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag);
void mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag);
MonoInst* mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks);
MonoInst* mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args, gboolean *ins_type_initialized);
MonoInst* mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
MonoInst* mini_emit_inst_for_field_load (MonoCompile *cfg, MonoClassField *field);
MonoInst* mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag);
MonoInst* mini_handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst *val, int context_used);
MonoMethod* mini_get_memcpy_method (void);
MonoMethod* mini_get_memset_method (void);
int mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass);
MonoRgctxAccess mini_get_rgctx_access_for_method (MonoMethod *method);
CompRelation mono_opcode_to_cond (int opcode);
CompType mono_opcode_to_type (int opcode, int cmp_opcode);
CompRelation mono_negate_cond (CompRelation cond);
int mono_op_imm_to_op (int opcode);
void mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins);
void mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins);
MonoUnwindOp *mono_create_unwind_op (int when,
int tag, int reg,
int val);
void mono_emit_unwind_op (MonoCompile *cfg, int when,
int tag, int reg,
int val);
MonoTrampInfo* mono_tramp_info_create (const char *name, guint8 *code, guint32 code_size, MonoJumpInfo *ji, GSList *unwind_ops);
void mono_tramp_info_free (MonoTrampInfo *info);
void mono_aot_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager);
void mono_tramp_info_register (MonoTrampInfo *info, MonoMemoryManager *mem_manager);
int mini_exception_id_by_name (const char *name);
gboolean mini_type_is_hfa (MonoType *t, int *out_nfields, int *out_esize);
int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
MonoInst *return_var, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call);
//the following methods could just be renamed/moved from method-to-ir.c
int mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip,
guint real_offset, gboolean inline_always);
MonoInst* mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
MonoInst* mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data);
void mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check);
void mini_reset_cast_details (MonoCompile *cfg);
void mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass);
gboolean mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used);
MonoInst *mono_decompose_opcode (MonoCompile *cfg, MonoInst *ins);
void mono_decompose_long_opts (MonoCompile *cfg);
void mono_decompose_vtype_opts (MonoCompile *cfg);
void mono_decompose_array_access_opts (MonoCompile *cfg);
void mono_decompose_soft_float (MonoCompile *cfg);
void mono_local_emulate_ops (MonoCompile *cfg);
void mono_handle_global_vregs (MonoCompile *cfg);
void mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts);
void mono_allocate_gsharedvt_vars (MonoCompile *cfg);
void mono_if_conversion (MonoCompile *cfg);
/* Delegates */
char* mono_get_delegate_virtual_invoke_impl_name (gboolean load_imt_reg, int offset);
gpointer mono_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method);
void mono_codegen (MonoCompile *cfg);
void mono_call_inst_add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, int vreg, int hreg, int bank);
void mono_call_inst_add_outarg_vt (MonoCompile *cfg, MonoCallInst *call, MonoInst *outarg_vt);
/* methods that must be provided by the arch-specific port */
void mono_arch_init (void);
void mono_arch_finish_init (void);
void mono_arch_cleanup (void);
void mono_arch_cpu_init (void);
guint32 mono_arch_cpu_optimizations (guint32 *exclude_mask);
const char *mono_arch_regname (int reg);
const char *mono_arch_fregname (int reg);
void mono_arch_exceptions_init (void);
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot);
guint8* mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot);
guint8 *mono_arch_create_llvm_native_thunk (guint8* addr);
gpointer mono_arch_get_get_tls_tramp (void);
GList *mono_arch_get_allocatable_int_vars (MonoCompile *cfg);
GList *mono_arch_get_global_int_regs (MonoCompile *cfg);
guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv);
void mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code, MonoJumpInfo *ji, gpointer target);
void mono_arch_flush_icache (guint8 *code, gint size);
guint8 *mono_arch_emit_prolog (MonoCompile *cfg);
void mono_arch_emit_epilog (MonoCompile *cfg);
void mono_arch_emit_exceptions (MonoCompile *cfg);
void mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb);
void mono_arch_fill_argument_info (MonoCompile *cfg);
void mono_arch_allocate_vars (MonoCompile *m);
int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info);
void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call);
void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src);
void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val);
MonoDynCallInfo *mono_arch_dyn_call_prepare (MonoMethodSignature *sig);
void mono_arch_dyn_call_free (MonoDynCallInfo *info);
int mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info);
void mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf);
void mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf);
MonoInst *mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins);
void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins);
GSList* mono_arch_get_delegate_invoke_impls (void);
LLVMCallInfo* mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig);
guint8* mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji);
guint8* mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, MonoJumpInfoType tramp_type, gconstpointer target);
GSList* mono_arch_get_cie_program (void);
void mono_arch_set_target (char *mtriple);
gboolean mono_arch_gsharedvt_sig_supported (MonoMethodSignature *sig);
gpointer mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_gsharedvt_call_info (MonoMemoryManager *mem_manager, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig, gboolean gsharedvt_in, gint32 vcall_offset, gboolean calli);
gboolean mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode);
gboolean mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_);
int mono_arch_translate_tls_offset (int offset);
gboolean mono_arch_opcode_supported (int opcode);
MONO_COMPONENT_API void mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func);
gboolean mono_arch_have_fast_tls (void);
#ifdef MONO_ARCH_HAS_REGISTER_ICALL
void mono_arch_register_icall (void);
#endif
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
gboolean mono_arch_is_soft_float (void);
#else
static inline MONO_ALWAYS_INLINE gboolean
mono_arch_is_soft_float (void)
{
return FALSE;
}
#endif
/* Soft Debug support */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
MONO_COMPONENT_API void mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip);
MONO_COMPONENT_API void mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip);
MONO_COMPONENT_API void mono_arch_start_single_stepping (void);
MONO_COMPONENT_API void mono_arch_stop_single_stepping (void);
gboolean mono_arch_is_single_step_event (void *info, void *sigctx);
gboolean mono_arch_is_breakpoint_event (void *info, void *sigctx);
MONO_COMPONENT_API void mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji);
MONO_COMPONENT_API void mono_arch_skip_single_step (MonoContext *ctx);
SeqPointInfo *mono_arch_get_seq_point_info (guint8 *code);
#endif
gboolean
mono_arch_unwind_frame (MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
MonoContext *new_ctx, MonoLMF **lmf,
host_mgreg_t **save_locations,
StackFrameInfo *frame_info);
gpointer mono_arch_get_throw_exception_by_name (void);
gpointer mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_rethrow_preserve_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot);
gboolean mono_arch_handle_exception (void *sigctx, gpointer obj);
void mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf);
gboolean mono_handle_soft_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, void *ctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, guint8* fault_addr);
void mono_handle_hard_stack_ovf (MonoJitTlsData *jit_tls, MonoJitInfo *ji, MonoContext *mctx, guint8* fault_addr);
void mono_arch_undo_ip_adjustment (MonoContext *ctx);
void mono_arch_do_ip_adjustment (MonoContext *ctx);
gpointer mono_arch_ip_from_context (void *sigctx);
MONO_COMPONENT_API host_mgreg_t mono_arch_context_get_int_reg (MonoContext *ctx, int reg);
MONO_COMPONENT_API host_mgreg_t*mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg);
MONO_COMPONENT_API void mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val);
void mono_arch_flush_register_windows (void);
gboolean mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm);
gboolean mono_arch_is_int_overflow (void *sigctx, void *info);
void mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg);
guint32 mono_arch_get_patch_offset (guint8 *code);
gpointer*mono_arch_get_delegate_method_ptr_addr (guint8* code, host_mgreg_t *regs);
void mono_arch_create_vars (MonoCompile *cfg);
void mono_arch_save_unwind_info (MonoCompile *cfg);
void mono_arch_register_lowlevel_calls (void);
gpointer mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr);
gpointer mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr);
gpointer mono_arch_get_ftnptr_arg_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr);
gpointer mono_arch_get_gsharedvt_arg_trampoline (gpointer arg, gpointer addr);
void mono_arch_patch_callsite (guint8 *method_start, guint8 *code, guint8 *addr);
void mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr);
int mono_arch_get_this_arg_reg (guint8 *code);
gpointer mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code);
gpointer mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target);
gpointer mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg);
gpointer mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len);
MonoMethod* mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code);
MonoVTable* mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code);
gpointer mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp);
void mono_arch_notify_pending_exc (MonoThreadInfo *info);
guint8* mono_arch_get_call_target (guint8 *code);
guint32 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code);
GSList *mono_arch_get_trampolines (gboolean aot);
gpointer mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info);
gpointer mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info);
#ifdef MONO_ARCH_HAVE_INTERP_PINVOKE_TRAMP
// Moves data (arguments and return vt address) from the InterpFrame to the CallContext so a pinvoke call can be made.
void mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig);
// Moves the return value from the InterpFrame to the ccontext, or to the retp (if native code passed the retvt address)
void mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp);
// When entering interp from native, this moves the arguments from the ccontext to the InterpFrame. If we have a return
// vt address, we return it. This ret vt address needs to be passed to mono_arch_set_native_call_context_ret.
gpointer mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig);
// After the pinvoke call is done, this moves return value from the ccontext to the InterpFrame.
void mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig);
#endif
/*New interruption machinery */
void
mono_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data);
void
mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data);
gboolean
mono_thread_state_init_from_handle (MonoThreadUnwindState *tctx, MonoThreadInfo *info, /*optional*/ void *sigctx);
/* Exception handling */
typedef gboolean (*MonoJitStackWalk) (StackFrameInfo *frame, MonoContext *ctx, gpointer data);
void mono_exceptions_init (void);
gboolean mono_handle_exception (MonoContext *ctx, gpointer obj);
void mono_handle_native_crash (const char *signal, MonoContext *mctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo);
MONO_API void mono_print_thread_dump (void *sigctx);
MONO_API void mono_print_thread_dump_from_ctx (MonoContext *ctx);
MONO_COMPONENT_API void mono_walk_stack_with_ctx (MonoJitStackWalk func, MonoContext *start_ctx, MonoUnwindOptions unwind_options, void *user_data);
MONO_COMPONENT_API void mono_walk_stack_with_state (MonoJitStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions unwind_options, void *user_data);
void mono_walk_stack (MonoJitStackWalk func, MonoUnwindOptions options, void *user_data);
gboolean mono_thread_state_init_from_sigctx (MonoThreadUnwindState *ctx, void *sigctx);
void mono_thread_state_init (MonoThreadUnwindState *ctx);
MONO_COMPONENT_API gboolean mono_thread_state_init_from_current (MonoThreadUnwindState *ctx);
MONO_COMPONENT_API gboolean mono_thread_state_init_from_monoctx (MonoThreadUnwindState *ctx, MonoContext *mctx);
void mono_setup_altstack (MonoJitTlsData *tls);
void mono_free_altstack (MonoJitTlsData *tls);
gpointer mono_altstack_restore_prot (host_mgreg_t *regs, guint8 *code, gpointer *tramp_data, guint8* tramp);
MONO_COMPONENT_API MonoJitInfo* mini_jit_info_table_find (gpointer addr);
MonoJitInfo* mini_jit_info_table_find_ext (gpointer addr, gboolean allow_trampolines);
G_EXTERN_C void mono_resume_unwind (MonoContext *ctx);
MonoJitInfo * mono_find_jit_info (MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx, MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset, gboolean *managed);
typedef gboolean (*MonoExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data);
MONO_API gboolean mono_exception_walk_trace (MonoException *ex, MonoExceptionFrameWalk func, gpointer user_data);
MONO_COMPONENT_API void mono_restore_context (MonoContext *ctx);
guint8* mono_jinfo_get_unwind_info (MonoJitInfo *ji, guint32 *unwind_info_len);
int mono_jinfo_get_epilog_size (MonoJitInfo *ji);
gboolean
mono_find_jit_info_ext (MonoJitTlsData *jit_tls,
MonoJitInfo *prev_ji, MonoContext *ctx,
MonoContext *new_ctx, char **trace, MonoLMF **lmf,
host_mgreg_t **save_locations,
StackFrameInfo *frame);
gpointer mono_get_throw_exception (void);
gpointer mono_get_rethrow_exception (void);
gpointer mono_get_rethrow_preserve_exception (void);
gpointer mono_get_call_filter (void);
gpointer mono_get_restore_context (void);
gpointer mono_get_throw_corlib_exception (void);
gpointer mono_get_throw_exception_addr (void);
gpointer mono_get_rethrow_preserve_exception_addr (void);
ICALL_EXPORT
MonoArray *ves_icall_get_trace (MonoException *exc, gint32 skip, MonoBoolean need_file_info);
ICALL_EXPORT
MonoBoolean ves_icall_get_frame_info (gint32 skip, MonoBoolean need_file_info,
MonoReflectionMethod **method,
gint32 *iloffset, gint32 *native_offset,
MonoString **file, gint32 *line, gint32 *column);
void mono_set_cast_details (MonoClass *from, MonoClass *to);
void mono_decompose_typechecks (MonoCompile *cfg);
/* Dominator/SSA methods */
void mono_compile_dominator_info (MonoCompile *cfg, int dom_flags);
void mono_compute_natural_loops (MonoCompile *cfg);
MonoBitSet* mono_compile_iterated_dfrontier (MonoCompile *cfg, MonoBitSet *set);
void mono_ssa_compute (MonoCompile *cfg);
void mono_ssa_remove (MonoCompile *cfg);
void mono_ssa_remove_gsharedvt (MonoCompile *cfg);
void mono_ssa_cprop (MonoCompile *cfg);
void mono_ssa_deadce (MonoCompile *cfg);
void mono_ssa_strength_reduction (MonoCompile *cfg);
void mono_free_loop_info (MonoCompile *cfg);
void mono_ssa_loop_invariant_code_motion (MonoCompile *cfg);
void mono_ssa_compute2 (MonoCompile *cfg);
void mono_ssa_remove2 (MonoCompile *cfg);
void mono_ssa_cprop2 (MonoCompile *cfg);
void mono_ssa_deadce2 (MonoCompile *cfg);
/* debugging support */
void mono_debug_init_method (MonoCompile *cfg, MonoBasicBlock *start_block,
guint32 breakpoint_id);
void mono_debug_open_method (MonoCompile *cfg);
void mono_debug_close_method (MonoCompile *cfg);
void mono_debug_free_method (MonoCompile *cfg);
void mono_debug_open_block (MonoCompile *cfg, MonoBasicBlock *bb, guint32 address);
void mono_debug_record_line_number (MonoCompile *cfg, MonoInst *ins, guint32 address);
void mono_debug_serialize_debug_info (MonoCompile *cfg, guint8 **out_buf, guint32 *buf_len);
void mono_debug_add_aot_method (MonoMethod *method, guint8 *code_start,
guint8 *debug_info, guint32 debug_info_len);
MONO_API void mono_debug_print_vars (gpointer ip, gboolean only_arguments);
MONO_API void mono_debugger_run_finally (MonoContext *start_ctx);
MONO_API gboolean mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size);
/* Tracing */
MonoCallSpec *mono_trace_set_options (const char *options);
gboolean mono_trace_eval (MonoMethod *method);
gboolean
mono_tailcall_print_enabled (void);
void
mono_tailcall_print (const char *format, ...);
gboolean
mono_is_supported_tailcall_helper (gboolean value, const char *svalue);
#define IS_SUPPORTED_TAILCALL(x) (mono_is_supported_tailcall_helper((x), #x))
extern void
mono_perform_abc_removal (MonoCompile *cfg);
extern void
mono_perform_abc_removal (MonoCompile *cfg);
extern void
mono_local_cprop (MonoCompile *cfg);
extern void
mono_local_cprop (MonoCompile *cfg);
extern void
mono_local_deadce (MonoCompile *cfg);
void
mono_local_alias_analysis (MonoCompile *cfg);
/* Generic sharing */
void
mono_set_generic_sharing_supported (gboolean supported);
void
mono_set_generic_sharing_vt_supported (gboolean supported);
void
mono_set_partial_sharing_supported (gboolean supported);
gboolean
mono_class_generic_sharing_enabled (MonoClass *klass);
gpointer
mono_class_fill_runtime_generic_context (MonoVTable *class_vtable, guint32 slot, MonoError *error);
gpointer
mono_method_fill_runtime_generic_context (MonoMethodRuntimeGenericContext *mrgctx, guint32 slot, MonoError *error);
const char*
mono_rgctx_info_type_to_str (MonoRgctxInfoType type);
MonoJumpInfoType
mini_rgctx_info_type_to_patch_info_type (MonoRgctxInfoType info_type);
gboolean
mono_method_needs_static_rgctx_invoke (MonoMethod *method, gboolean allow_type_vars);
int
mono_class_rgctx_get_array_size (int n, gboolean mrgctx);
MonoGenericContext
mono_method_construct_object_context (MonoMethod *method);
MONO_COMPONENT_API MonoMethod*
mono_method_get_declaring_generic_method (MonoMethod *method);
int
mono_generic_context_check_used (MonoGenericContext *context);
int
mono_class_check_context_used (MonoClass *klass);
gboolean
mono_generic_context_is_sharable (MonoGenericContext *context, gboolean allow_type_vars);
gboolean
mono_generic_context_is_sharable_full (MonoGenericContext *context, gboolean allow_type_vars, gboolean allow_partial);
gboolean
mono_method_is_generic_impl (MonoMethod *method);
gboolean
mono_method_is_generic_sharable (MonoMethod *method, gboolean allow_type_vars);
gboolean
mono_method_is_generic_sharable_full (MonoMethod *method, gboolean allow_type_vars, gboolean allow_partial, gboolean allow_gsharedvt);
gboolean
mini_class_is_generic_sharable (MonoClass *klass);
gboolean
mini_generic_inst_is_sharable (MonoGenericInst *inst, gboolean allow_type_vars, gboolean allow_partial);
MonoMethod*
mono_class_get_method_generic (MonoClass *klass, MonoMethod *method, MonoError *error);
gboolean
mono_is_partially_sharable_inst (MonoGenericInst *inst);
gboolean
mini_is_gsharedvt_gparam (MonoType *t);
gboolean
mini_is_gsharedvt_inst (MonoGenericInst *inst);
MonoGenericContext* mini_method_get_context (MonoMethod *method);
int mono_method_check_context_used (MonoMethod *method);
gboolean mono_generic_context_equal_deep (MonoGenericContext *context1, MonoGenericContext *context2);
gpointer mono_helper_get_rgctx_other_ptr (MonoClass *caller_class, MonoVTable *vtable,
guint32 token, guint32 token_source, guint32 rgctx_type,
gint32 rgctx_index);
void mono_generic_sharing_init (void);
MonoClass* mini_class_get_container_class (MonoClass *klass);
MonoGenericContext* mini_class_get_context (MonoClass *klass);
typedef enum {
SHARE_MODE_NONE = 0x0,
SHARE_MODE_GSHAREDVT = 0x1,
} GetSharedMethodFlags;
MonoType* mini_get_underlying_type (MonoType *type);
MonoType* mini_type_get_underlying_type (MonoType *type);
MonoClass* mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context);
MonoMethod* mini_get_shared_method_to_register (MonoMethod *method);
MonoMethod* mini_get_shared_method_full (MonoMethod *method, GetSharedMethodFlags flags, MonoError *error);
MonoType* mini_get_shared_gparam (MonoType *t, MonoType *constraint);
int mini_get_rgctx_entry_slot (MonoJumpInfoRgctxEntry *entry);
int mini_type_stack_size (MonoType *t, int *align);
int mini_type_stack_size_full (MonoType *t, guint32 *align, gboolean pinvoke);
void mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst);
guint mono_type_to_regmove (MonoCompile *cfg, MonoType *type);
void mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb);
void mono_cfg_set_exception (MonoCompile *cfg, MonoExceptionType type);
void mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg);
#define MONO_TIME_TRACK(a, phase) \
{ \
gint64 start = mono_time_track_start (); \
(phase) ; \
mono_time_track_end (&(a), start); \
}
gint64 mono_time_track_start (void);
void mono_time_track_end (gint64 *time, gint64 start);
void mono_update_jit_stats (MonoCompile *cfg);
gboolean mini_type_is_reference (MonoType *type);
gboolean mini_type_is_vtype (MonoType *t);
gboolean mini_type_var_is_vt (MonoType *type);
gboolean mini_is_gsharedvt_type (MonoType *t);
gboolean mini_is_gsharedvt_klass (MonoClass *klass);
gboolean mini_is_gsharedvt_signature (MonoMethodSignature *sig);
gboolean mini_is_gsharedvt_variable_type (MonoType *t);
gboolean mini_is_gsharedvt_variable_klass (MonoClass *klass);
gboolean mini_is_gsharedvt_sharable_method (MonoMethod *method);
gboolean mini_is_gsharedvt_variable_signature (MonoMethodSignature *sig);
gboolean mini_is_gsharedvt_sharable_inst (MonoGenericInst *inst);
gboolean mini_method_is_default_method (MonoMethod *m);
gboolean mini_method_needs_mrgctx (MonoMethod *m);
gpointer mini_method_get_rgctx (MonoMethod *m);
void mini_init_gsctx (MonoMemPool *mp, MonoGenericContext *context, MonoGenericSharingContext *gsctx);
gpointer mini_get_gsharedvt_wrapper (gboolean gsharedvt_in, gpointer addr, MonoMethodSignature *normal_sig, MonoMethodSignature *gsharedvt_sig,
gint32 vcall_offset, gboolean calli);
MonoMethod* mini_get_gsharedvt_in_sig_wrapper (MonoMethodSignature *sig);
MonoMethod* mini_get_gsharedvt_out_sig_wrapper (MonoMethodSignature *sig);
MonoMethodSignature* mini_get_gsharedvt_out_sig_wrapper_signature (gboolean has_this, gboolean has_ret, int param_count);
gboolean mini_gsharedvt_runtime_invoke_supported (MonoMethodSignature *sig);
G_EXTERN_C void mono_interp_entry_from_trampoline (gpointer ccontext, gpointer imethod);
G_EXTERN_C void mono_interp_to_native_trampoline (gpointer addr, gpointer ccontext);
MonoMethod* mini_get_interp_in_wrapper (MonoMethodSignature *sig);
MonoMethod* mini_get_interp_lmf_wrapper (const char *name, gpointer target);
char* mono_get_method_from_ip (void *ip);
/* SIMD support */
typedef enum {
/* Used for lazy initialization */
MONO_CPU_INITED = 1 << 0,
#if defined(TARGET_X86) || defined(TARGET_AMD64)
MONO_CPU_X86_SSE = 1 << 1,
MONO_CPU_X86_SSE2 = 1 << 2,
MONO_CPU_X86_PCLMUL = 1 << 3,
MONO_CPU_X86_AES = 1 << 4,
MONO_CPU_X86_SSE3 = 1 << 5,
MONO_CPU_X86_SSSE3 = 1 << 6,
MONO_CPU_X86_SSE41 = 1 << 7,
MONO_CPU_X86_SSE42 = 1 << 8,
MONO_CPU_X86_POPCNT = 1 << 9,
MONO_CPU_X86_AVX = 1 << 10,
MONO_CPU_X86_AVX2 = 1 << 11,
MONO_CPU_X86_FMA = 1 << 12,
MONO_CPU_X86_LZCNT = 1 << 13,
MONO_CPU_X86_BMI1 = 1 << 14,
MONO_CPU_X86_BMI2 = 1 << 15,
//
// Dependencies (based on System.Runtime.Intrinsics.X86 class hierarchy):
//
// sse
// sse2
// pclmul
// aes
// sse3
// ssse3 (doesn't include 'pclmul' and 'aes')
// sse4.1
// sse4.2
// popcnt
// avx (doesn't include 'popcnt')
// avx2
// fma
// lzcnt
// bmi1
// bmi2
MONO_CPU_X86_SSE_COMBINED = MONO_CPU_X86_SSE,
MONO_CPU_X86_SSE2_COMBINED = MONO_CPU_X86_SSE_COMBINED | MONO_CPU_X86_SSE2,
MONO_CPU_X86_PCLMUL_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_PCLMUL,
MONO_CPU_X86_AES_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_AES,
MONO_CPU_X86_SSE3_COMBINED = MONO_CPU_X86_SSE2_COMBINED | MONO_CPU_X86_SSE3,
MONO_CPU_X86_SSSE3_COMBINED = MONO_CPU_X86_SSE3_COMBINED | MONO_CPU_X86_SSSE3,
MONO_CPU_X86_SSE41_COMBINED = MONO_CPU_X86_SSSE3_COMBINED | MONO_CPU_X86_SSE41,
MONO_CPU_X86_SSE42_COMBINED = MONO_CPU_X86_SSE41_COMBINED | MONO_CPU_X86_SSE42,
MONO_CPU_X86_POPCNT_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_POPCNT,
MONO_CPU_X86_AVX_COMBINED = MONO_CPU_X86_SSE42_COMBINED | MONO_CPU_X86_AVX,
MONO_CPU_X86_AVX2_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_AVX2,
MONO_CPU_X86_FMA_COMBINED = MONO_CPU_X86_AVX_COMBINED | MONO_CPU_X86_FMA,
MONO_CPU_X86_FULL_SSEAVX_COMBINED = MONO_CPU_X86_FMA_COMBINED | MONO_CPU_X86_AVX2 | MONO_CPU_X86_PCLMUL
| MONO_CPU_X86_AES | MONO_CPU_X86_POPCNT | MONO_CPU_X86_FMA,
#endif
#ifdef TARGET_WASM
MONO_CPU_WASM_SIMD = 1 << 1,
#endif
#ifdef TARGET_ARM64
MONO_CPU_ARM64_BASE = 1 << 1,
MONO_CPU_ARM64_CRC = 1 << 2,
MONO_CPU_ARM64_CRYPTO = 1 << 3,
MONO_CPU_ARM64_NEON = 1 << 4,
MONO_CPU_ARM64_RDM = 1 << 5,
MONO_CPU_ARM64_DP = 1 << 6,
#endif
} MonoCPUFeatures;
G_ENUM_FUNCTIONS (MonoCPUFeatures)
MonoCPUFeatures mini_get_cpu_features (MonoCompile* cfg);
enum {
SIMD_COMP_EQ,
SIMD_COMP_LT,
SIMD_COMP_LE,
SIMD_COMP_UNORD,
SIMD_COMP_NEQ,
SIMD_COMP_NLT,
SIMD_COMP_NLE,
SIMD_COMP_ORD
};
enum {
SIMD_PREFETCH_MODE_NTA,
SIMD_PREFETCH_MODE_0,
SIMD_PREFETCH_MODE_1,
SIMD_PREFETCH_MODE_2,
};
const char *mono_arch_xregname (int reg);
MonoCPUFeatures mono_arch_get_cpu_features (void);
#ifdef MONO_ARCH_SIMD_INTRINSICS
void mono_simd_simplify_indirection (MonoCompile *cfg);
void mono_simd_decompose_intrinsic (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins);
MonoInst* mono_emit_simd_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
MonoInst* mono_emit_simd_field_load (MonoCompile *cfg, MonoClassField *field, MonoInst *addr);
void mono_simd_intrinsics_init (void);
#endif
gboolean mono_class_is_magic_int (MonoClass *klass);
gboolean mono_class_is_magic_float (MonoClass *klass);
MonoInst* mono_emit_native_types_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args);
gsize mini_magic_type_size (MonoCompile *cfg, MonoType *type);
gboolean mini_magic_is_int_type (MonoType *t);
gboolean mini_magic_is_float_type (MonoType *t);
MonoType* mini_native_type_replace_type (MonoType *type);
MonoMethod*
mini_method_to_shared (MonoMethod *method); // null if not shared
static inline gboolean
mini_safepoints_enabled (void)
{
#if defined (TARGET_WASM)
return FALSE;
#else
return TRUE;
#endif
}
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id);
MONO_COMPONENT_API MonoGenericContext
mono_get_generic_context_from_stack_frame (MonoJitInfo *ji, gpointer generic_info);
MONO_COMPONENT_API gpointer
mono_get_generic_info_from_stack_frame (MonoJitInfo *ji, MonoContext *ctx);
MonoMemoryManager* mini_get_default_mem_manager (void);
MONO_COMPONENT_API int
mono_wasm_get_debug_level (void);
#endif /* __MONO_MINI_H__ */
| 1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/pal/inc/rt/weakreference.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: weakreference.h
//
// ===========================================================================
// simplified weakreference.h for PAL
#include "rpc.h"
#include "rpcndr.h"
#include "unknwn.h"
#ifndef __IInspectable_INTERFACE_DEFINED__
#define __IInspectable_INTERFACE_DEFINED__
typedef struct HSTRING__{
int unused;
} HSTRING__;
typedef HSTRING__* HSTRING;
typedef /* [v1_enum] */
enum TrustLevel
{
BaseTrust = 0,
PartialTrust = ( BaseTrust + 1 ) ,
FullTrust = ( PartialTrust + 1 )
} TrustLevel;
// AF86E2E0-B12D-4c6a-9C5A-D7AA65101E90
const IID IID_IInspectable = { 0xaf86e2e0, 0xb12d, 0x4c6a, { 0x9c, 0x5a, 0xd7, 0xaa, 0x65, 0x10, 0x1e, 0x90} };
MIDL_INTERFACE("AF86E2E0-B12D-4c6a-9C5A-D7AA65101E90")
IInspectable : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE GetIids(
/* [out] */ ULONG * iidCount,
/* [size_is][size_is][out] */ IID * *iids) = 0;
virtual HRESULT STDMETHODCALLTYPE GetRuntimeClassName(
/* [out] */ HSTRING * className) = 0;
virtual HRESULT STDMETHODCALLTYPE GetTrustLevel(
/* [out] */ TrustLevel * trustLevel) = 0;
};
#endif // __IInspectable_INTERFACE_DEFINED__
#ifndef __IWeakReference_INTERFACE_DEFINED__
#define __IWeakReference_INTERFACE_DEFINED__
// 00000037-0000-0000-C000-000000000046
const IID IID_IWeakReference = { 0x00000037, 0x0000, 0x0000, { 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46} };
MIDL_INTERFACE("00000037-0000-0000-C000-000000000046")
IWeakReference : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE Resolve(
/* [in] */ REFIID riid,
/* [iid_is][out] */ IInspectable **objectReference) = 0;
};
#endif // __IWeakReference_INTERFACE_DEFINED__
#ifndef __IWeakReferenceSource_INTERFACE_DEFINED__
#define __IWeakReferenceSource_INTERFACE_DEFINED__
// 00000038-0000-0000-C000-000000000046
const IID IID_IWeakReferenceSource = { 0x00000038, 0x0000, 0x0000, { 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46} };
MIDL_INTERFACE("00000038-0000-0000-C000-000000000046")
IWeakReferenceSource : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE GetWeakReference(
/* [retval][out] */ IWeakReference * *weakReference) = 0;
};
#endif // __IWeakReferenceSource_INTERFACE_DEFINED__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: weakreference.h
//
// ===========================================================================
// simplified weakreference.h for PAL
#include "rpc.h"
#include "rpcndr.h"
#include "unknwn.h"
#ifndef __IInspectable_INTERFACE_DEFINED__
#define __IInspectable_INTERFACE_DEFINED__
typedef struct HSTRING__{
int unused;
} HSTRING__;
typedef HSTRING__* HSTRING;
typedef /* [v1_enum] */
enum TrustLevel
{
BaseTrust = 0,
PartialTrust = ( BaseTrust + 1 ) ,
FullTrust = ( PartialTrust + 1 )
} TrustLevel;
// AF86E2E0-B12D-4c6a-9C5A-D7AA65101E90
const IID IID_IInspectable = { 0xaf86e2e0, 0xb12d, 0x4c6a, { 0x9c, 0x5a, 0xd7, 0xaa, 0x65, 0x10, 0x1e, 0x90} };
MIDL_INTERFACE("AF86E2E0-B12D-4c6a-9C5A-D7AA65101E90")
IInspectable : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE GetIids(
/* [out] */ ULONG * iidCount,
/* [size_is][size_is][out] */ IID * *iids) = 0;
virtual HRESULT STDMETHODCALLTYPE GetRuntimeClassName(
/* [out] */ HSTRING * className) = 0;
virtual HRESULT STDMETHODCALLTYPE GetTrustLevel(
/* [out] */ TrustLevel * trustLevel) = 0;
};
#endif // __IInspectable_INTERFACE_DEFINED__
#ifndef __IWeakReference_INTERFACE_DEFINED__
#define __IWeakReference_INTERFACE_DEFINED__
// 00000037-0000-0000-C000-000000000046
const IID IID_IWeakReference = { 0x00000037, 0x0000, 0x0000, { 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46} };
MIDL_INTERFACE("00000037-0000-0000-C000-000000000046")
IWeakReference : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE Resolve(
/* [in] */ REFIID riid,
/* [iid_is][out] */ IInspectable **objectReference) = 0;
};
#endif // __IWeakReference_INTERFACE_DEFINED__
#ifndef __IWeakReferenceSource_INTERFACE_DEFINED__
#define __IWeakReferenceSource_INTERFACE_DEFINED__
// 00000038-0000-0000-C000-000000000046
const IID IID_IWeakReferenceSource = { 0x00000038, 0x0000, 0x0000, { 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46} };
MIDL_INTERFACE("00000038-0000-0000-C000-000000000046")
IWeakReferenceSource : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE GetWeakReference(
/* [retval][out] */ IWeakReference * *weakReference) = 0;
};
#endif // __IWeakReferenceSource_INTERFACE_DEFINED__
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/tramp-mips.c | /**
* \file
* JIT trampoline code for MIPS
*
* Authors:
* Mark Mason ([email protected])
*
* Based on tramp-ppc.c by:
* Dietmar Maurer ([email protected])
* Paolo Molaro ([email protected])
* Carlos Valiente <[email protected]>
*
* (C) 2006 Broadcom
* (C) 2001 Ximian, Inc.
*/
#include <config.h>
#include <glib.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/tabledefs.h>
#include <mono/arch/mips/mips-codegen.h>
#include "mini.h"
#include "mini-mips.h"
#include "mini-runtime.h"
#include "mono/utils/mono-tls-inline.h"
/*
* get_unbox_trampoline:
* @m: method pointer
* @addr: pointer to native code for @m
*
* when value type methods are called through the vtable we need to unbox the
* this argument. This method returns a pointer to a trampoline which does
* unboxing before calling the method
*/
gpointer
mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
{
guint8 *code, *start;
MonoMemoryManager *mem_manager = m_method_get_mem_manager (m);
start = code = mono_mem_manager_code_reserve (mem_manager, 20);
mips_load (code, mips_t9, addr);
/* The this pointer is kept in a0 */
mips_addiu (code, mips_a0, mips_a0, MONO_ABI_SIZEOF (MonoObject));
mips_jr (code, mips_t9);
mips_nop (code);
mono_arch_flush_icache (start, code - start);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));
g_assert ((code - start) <= 20);
/*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name);
g_print ("unbox code is at %p for method at %p\n", start, addr);*/
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL);
return start;
}
void
mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
{
guint32 *code = (guint32*)orig_code;
/* Locate the address of the method-specific trampoline.
The call using the vtable slot that took the processing flow to
'arch_create_jit_trampoline' looks something like one of these:
jal XXXXYYYY
nop
lui t9, XXXX
addiu t9, YYYY
jalr t9
nop
On entry, 'code' points just after one of the above sequences.
*/
/* The jal case */
if ((code[-2] >> 26) == 0x03) {
//g_print ("direct patching\n");
mips_patch ((code-2), (gsize)addr);
return;
}
/* Look for the jalr */
if ((code[-2] & 0xfc1f003f) == 0x00000009) {
/* The lui / addiu / jalr case */
if ((code [-4] >> 26) == 0x0f && (code [-3] >> 26) == 0x09
&& (code [-2] >> 26) == 0) {
mips_patch ((code-4), (gsize)addr);
return;
}
}
g_print("error: bad patch at 0x%08x\n", code);
g_assert_not_reached ();
}
void
mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr)
{
g_assert_not_reached ();
}
/* Stack size for trampoline function
* MIPS_MINIMAL_STACK_SIZE + 16 (args + alignment to mips_magic_trampoline)
* + MonoLMF + 14 fp regs + 13 gregs + alignment
* #define STACK (MIPS_MINIMAL_STACK_SIZE + 4 * sizeof (gulong) + sizeof (MonoLMF) + 14 * sizeof (double) + 13 * (sizeof (gulong)))
* STACK would be 444 for 32 bit darwin
*/
#define STACK (int)(ALIGN_TO(4*IREG_SIZE + 8 + sizeof(MonoLMF) + 32, 8))
/*
* Stack frame description when the generic trampoline is called.
* caller frame
* --------------------
* MonoLMF
* -------------------
* Saved FP registers 0-13
* -------------------
* Saved general registers 0-12
* -------------------
* param area for 3 args to mips_magic_trampoline
* -------------------
* linkage area
* -------------------
*/
guchar*
mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
const char *tramp_name;
guint8 *buf, *tramp, *code = NULL;
int i, lmf;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
int max_code_len = 768;
/* AOT not supported on MIPS yet */
g_assert (!aot);
/* Now we'll create in 'buf' the MIPS trampoline code. This
is the trampoline code common to all methods */
code = buf = mono_global_codeman_reserve (max_code_len);
/* Allocate the stack frame, and save the return address */
mips_addiu (code, mips_sp, mips_sp, -STACK);
mips_sw (code, mips_ra, mips_sp, STACK + MIPS_RET_ADDR_OFFSET);
/* we build the MonoLMF structure on the stack - see mini-mips.h */
/* offset of MonoLMF from sp */
lmf = STACK - sizeof (MonoLMF) - 8;
for (i = 0; i < MONO_MAX_IREGS; i++)
MIPS_SW (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[i]));
for (i = 0; i < MONO_MAX_FREGS; i++)
MIPS_SWC1 (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, fregs[i]));
/* Set the magic number */
mips_load_const (code, mips_at, MIPS_LMF_MAGIC2);
mips_sw (code, mips_at, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, magic));
/* Save caller sp */
mips_addiu (code, mips_at, mips_sp, STACK);
MIPS_SW (code, mips_at, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[mips_sp]));
/* save method info (it was in t8) */
mips_sw (code, mips_t8, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, method));
/* save the IP (caller ip) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
mips_sw (code, mips_zero, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, eip));
} else {
mips_sw (code, mips_ra, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, eip));
}
/* jump to mono_get_lmf_addr here */
mips_load (code, mips_t9, mono_get_lmf_addr);
mips_jalr (code, mips_t9, mips_ra);
mips_nop (code);
/* v0 now points at the (MonoLMF **) for the current thread */
/* new_lmf->lmf_addr = lmf_addr -- useful when unwinding */
mips_sw (code, mips_v0, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, lmf_addr));
/* new_lmf->previous_lmf = *lmf_addr */
mips_lw (code, mips_at, mips_v0, 0);
mips_sw (code, mips_at, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, previous_lmf));
/* *(lmf_addr) = new_lmf */
mips_addiu (code, mips_at, mips_sp, lmf);
mips_sw (code, mips_at, mips_v0, 0);
/*
* Now we're ready to call mips_magic_trampoline ().
*/
/* Arg 1: pointer to registers so that the magic trampoline can
* access what we saved above
*/
mips_addiu (code, mips_a0, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[0]));
/* Arg 2: code (next address to the instruction that called us) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
mips_move (code, mips_a1, mips_zero);
} else {
mips_lw (code, mips_a1, mips_sp, STACK + MIPS_RET_ADDR_OFFSET);
}
/* Arg 3: MonoMethod *method. */
mips_lw (code, mips_a2, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, method));
/* Arg 4: Trampoline */
mips_move (code, mips_a3, mips_zero);
/* Now go to the trampoline */
tramp = (guint8*)mono_get_trampoline_func (tramp_type);
mips_load (code, mips_t9, (guint32)tramp);
mips_jalr (code, mips_t9, mips_ra);
mips_nop (code);
/* Code address is now in v0, move it to at */
mips_move (code, mips_at, mips_v0);
/*
* Now unwind the MonoLMF
*/
/* t0 = current_lmf->previous_lmf */
mips_lw (code, mips_t0, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, previous_lmf));
/* t1 = lmf_addr */
mips_lw (code, mips_t1, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, lmf_addr));
/* (*lmf_addr) = previous_lmf */
mips_sw (code, mips_t0, mips_t1, 0);
/* Restore the callee-saved & argument registers */
for (i = 0; i < MONO_MAX_IREGS; i++) {
if ((MONO_ARCH_CALLEE_SAVED_REGS | MONO_ARCH_CALLEE_REGS | MIPS_ARG_REGS) & (1 << i))
MIPS_LW (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[i]));
}
for (i = 0; i < MONO_MAX_FREGS; i++)
MIPS_LWC1 (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, fregs[i]));
/* Non-standard function epilogue. Instead of doing a proper
* return, we just jump to the compiled code.
*/
/* Restore ra & stack pointer, and jump to the code */
if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
mips_move (code, mips_v0, mips_at);
mips_lw (code, mips_ra, mips_sp, STACK + MIPS_RET_ADDR_OFFSET);
mips_addiu (code, mips_sp, mips_sp, STACK);
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type))
mips_jr (code, mips_ra);
else
mips_jr (code, mips_at);
mips_nop (code);
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (buf, code - buf);
MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
/* Sanity check */
g_assert ((code - buf) <= max_code_len);
g_assert (info);
tramp_name = mono_get_generic_trampoline_name (tramp_type);
*info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
return buf;
}
gpointer
mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len)
{
guint8 *code, *buf, *tramp;
tramp = mono_get_trampoline_code (tramp_type);
code = buf = mono_mem_manager_code_reserve (mem_manager, 32);
/* Prepare the jump to the generic trampoline code
* mono_arch_create_trampoline_code() knows we're putting this in t8
*/
mips_load (code, mips_t8, arg1);
/* Now jump to the generic trampoline code */
mips_load (code, mips_at, tramp);
mips_jr (code, mips_at);
mips_nop (code);
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (buf, code - buf);
MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type)));
g_assert ((code - buf) <= 32);
if (code_len)
*code_len = code - buf;
return buf;
}
gpointer
mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr)
{
guint8 *code, *start;
int buf_len;
buf_len = 24;
start = code = mono_mem_manager_code_reserve (mem_manager, buf_len);
mips_load (code, MONO_ARCH_RGCTX_REG, arg);
mips_load (code, mips_at, addr);
mips_jr (code, mips_at);
mips_nop (code);
g_assert ((code - start) <= buf_len);
mono_arch_flush_icache (start, code - start);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL);
return start;
}
gpointer
mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
{
guint8 *tramp;
guint8 *code, *buf;
int tramp_size;
guint32 code_len;
guint8 **rgctx_null_jumps;
int depth, index;
int i, njumps;
gboolean mrgctx;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
index = MONO_RGCTX_SLOT_INDEX (slot);
if (mrgctx)
index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (target_mgreg_t);
for (depth = 0; ; ++depth) {
int size = mono_class_rgctx_get_array_size (depth, mrgctx);
if (index < size - 1)
break;
index -= size - 1;
}
tramp_size = 64 + 16 * depth;
code = buf = mono_global_codeman_reserve (tramp_size);
mono_add_unwind_op_def_cfa (unwind_ops, code, buf, mips_sp, 0);
rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
njumps = 0;
/* The vtable/mrgctx is in a0 */
g_assert (MONO_ARCH_VTABLE_REG == mips_a0);
if (mrgctx) {
/* get mrgctx ptr */
mips_move (code, mips_a1, mips_a0);
} else {
/* load rgctx ptr from vtable */
g_assert (mips_is_imm16 (MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
mips_lw (code, mips_a1, mips_a0, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
/* is the rgctx ptr null? */
/* if yes, jump to actual trampoline */
rgctx_null_jumps [njumps ++] = code;
mips_beq (code, mips_a1, mips_zero, 0);
mips_nop (code);
}
for (i = 0; i < depth; ++i) {
/* load ptr to next array */
if (mrgctx && i == 0) {
g_assert (mips_is_imm16 (MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT));
mips_lw (code, mips_a1, mips_a1, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
} else {
mips_lw (code, mips_a1, mips_a1, 0);
}
/* is the ptr null? */
/* if yes, jump to actual trampoline */
rgctx_null_jumps [njumps ++] = code;
mips_beq (code, mips_a1, mips_zero, 0);
mips_nop (code);
}
/* fetch slot */
g_assert (mips_is_imm16 (sizeof (target_mgreg_t) * (index + 1)));
mips_lw (code, mips_a1, mips_a1, sizeof (target_mgreg_t) * (index + 1));
/* is the slot null? */
/* if yes, jump to actual trampoline */
rgctx_null_jumps [njumps ++] = code;
mips_beq (code, mips_a1, mips_zero, 0);
mips_nop (code);
/* otherwise return, result is in R1 */
mips_move (code, mips_v0, mips_a1);
mips_jr (code, mips_ra);
mips_nop (code);
g_assert (njumps <= depth + 2);
for (i = 0; i < njumps; ++i)
mips_patch ((guint32*)rgctx_null_jumps [i], (guint32)code);
g_free (rgctx_null_jumps);
/* Slowpath */
/* The vtable/mrgctx is still in a0 */
if (aot) {
ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR, GUINT_TO_POINTER (slot));
mips_load (code, mips_at, 0);
mips_jr (code, mips_at);
mips_nop (code);
} else {
MonoMemoryManager *mem_manager = mini_get_default_mem_manager ();
tramp = (guint8*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mem_manager, &code_len);
mips_load (code, mips_at, tramp);
mips_jr (code, mips_at);
mips_nop (code);
}
mono_arch_flush_icache (buf, code - buf);
MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
g_assert (code - buf <= tramp_size);
if (info) {
char *name = mono_get_rgctx_fetch_trampoline_name (slot);
*info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
g_free (name);
}
return buf;
}
| /**
* \file
* JIT trampoline code for MIPS
*
* Authors:
* Mark Mason ([email protected])
*
* Based on tramp-ppc.c by:
* Dietmar Maurer ([email protected])
* Paolo Molaro ([email protected])
* Carlos Valiente <[email protected]>
*
* (C) 2006 Broadcom
* (C) 2001 Ximian, Inc.
*/
#include <config.h>
#include <glib.h>
#include <mono/metadata/abi-details.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/tabledefs.h>
#include <mono/arch/mips/mips-codegen.h>
#include "mini.h"
#include "mini-mips.h"
#include "mini-runtime.h"
#include "mono/utils/mono-tls-inline.h"
/*
* get_unbox_trampoline:
* @m: method pointer
* @addr: pointer to native code for @m
*
* when value type methods are called through the vtable we need to unbox the
* this argument. This method returns a pointer to a trampoline which does
* unboxing before calling the method
*/
gpointer
mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
{
guint8 *code, *start;
MonoMemoryManager *mem_manager = m_method_get_mem_manager (m);
start = code = mono_mem_manager_code_reserve (mem_manager, 20);
mips_load (code, mips_t9, addr);
/* The this pointer is kept in a0 */
mips_addiu (code, mips_a0, mips_a0, MONO_ABI_SIZEOF (MonoObject));
mips_jr (code, mips_t9);
mips_nop (code);
mono_arch_flush_icache (start, code - start);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));
g_assert ((code - start) <= 20);
/*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name);
g_print ("unbox code is at %p for method at %p\n", start, addr);*/
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL);
return start;
}
void
mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
{
guint32 *code = (guint32*)orig_code;
/* Locate the address of the method-specific trampoline.
The call using the vtable slot that took the processing flow to
'arch_create_jit_trampoline' looks something like one of these:
jal XXXXYYYY
nop
lui t9, XXXX
addiu t9, YYYY
jalr t9
nop
On entry, 'code' points just after one of the above sequences.
*/
/* The jal case */
if ((code[-2] >> 26) == 0x03) {
//g_print ("direct patching\n");
mips_patch ((code-2), (gsize)addr);
return;
}
/* Look for the jalr */
if ((code[-2] & 0xfc1f003f) == 0x00000009) {
/* The lui / addiu / jalr case */
if ((code [-4] >> 26) == 0x0f && (code [-3] >> 26) == 0x09
&& (code [-2] >> 26) == 0) {
mips_patch ((code-4), (gsize)addr);
return;
}
}
g_print("error: bad patch at 0x%08x\n", code);
g_assert_not_reached ();
}
void
mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr)
{
g_assert_not_reached ();
}
/* Stack size for trampoline function
* MIPS_MINIMAL_STACK_SIZE + 16 (args + alignment to mips_magic_trampoline)
* + MonoLMF + 14 fp regs + 13 gregs + alignment
* #define STACK (MIPS_MINIMAL_STACK_SIZE + 4 * sizeof (gulong) + sizeof (MonoLMF) + 14 * sizeof (double) + 13 * (sizeof (gulong)))
* STACK would be 444 for 32 bit darwin
*/
#define STACK (int)(ALIGN_TO(4*IREG_SIZE + 8 + sizeof(MonoLMF) + 32, 8))
/*
* Stack frame description when the generic trampoline is called.
* caller frame
* --------------------
* MonoLMF
* -------------------
* Saved FP registers 0-13
* -------------------
* Saved general registers 0-12
* -------------------
* param area for 3 args to mips_magic_trampoline
* -------------------
* linkage area
* -------------------
*/
guchar*
mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
const char *tramp_name;
guint8 *buf, *tramp, *code = NULL;
int i, lmf;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
int max_code_len = 768;
/* AOT not supported on MIPS yet */
g_assert (!aot);
/* Now we'll create in 'buf' the MIPS trampoline code. This
is the trampoline code common to all methods */
code = buf = mono_global_codeman_reserve (max_code_len);
/* Allocate the stack frame, and save the return address */
mips_addiu (code, mips_sp, mips_sp, -STACK);
mips_sw (code, mips_ra, mips_sp, STACK + MIPS_RET_ADDR_OFFSET);
/* we build the MonoLMF structure on the stack - see mini-mips.h */
/* offset of MonoLMF from sp */
lmf = STACK - sizeof (MonoLMF) - 8;
for (i = 0; i < MONO_MAX_IREGS; i++)
MIPS_SW (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[i]));
for (i = 0; i < MONO_MAX_FREGS; i++)
MIPS_SWC1 (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, fregs[i]));
/* Set the magic number */
mips_load_const (code, mips_at, MIPS_LMF_MAGIC2);
mips_sw (code, mips_at, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, magic));
/* Save caller sp */
mips_addiu (code, mips_at, mips_sp, STACK);
MIPS_SW (code, mips_at, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[mips_sp]));
/* save method info (it was in t8) */
mips_sw (code, mips_t8, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, method));
/* save the IP (caller ip) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
mips_sw (code, mips_zero, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, eip));
} else {
mips_sw (code, mips_ra, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, eip));
}
/* jump to mono_get_lmf_addr here */
mips_load (code, mips_t9, mono_get_lmf_addr);
mips_jalr (code, mips_t9, mips_ra);
mips_nop (code);
/* v0 now points at the (MonoLMF **) for the current thread */
/* new_lmf->lmf_addr = lmf_addr -- useful when unwinding */
mips_sw (code, mips_v0, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, lmf_addr));
/* new_lmf->previous_lmf = *lmf_addr */
mips_lw (code, mips_at, mips_v0, 0);
mips_sw (code, mips_at, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, previous_lmf));
/* *(lmf_addr) = new_lmf */
mips_addiu (code, mips_at, mips_sp, lmf);
mips_sw (code, mips_at, mips_v0, 0);
/*
* Now we're ready to call mips_magic_trampoline ().
*/
/* Arg 1: pointer to registers so that the magic trampoline can
* access what we saved above
*/
mips_addiu (code, mips_a0, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[0]));
/* Arg 2: code (next address to the instruction that called us) */
if (tramp_type == MONO_TRAMPOLINE_JUMP) {
mips_move (code, mips_a1, mips_zero);
} else {
mips_lw (code, mips_a1, mips_sp, STACK + MIPS_RET_ADDR_OFFSET);
}
/* Arg 3: MonoMethod *method. */
mips_lw (code, mips_a2, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, method));
/* Arg 4: Trampoline */
mips_move (code, mips_a3, mips_zero);
/* Now go to the trampoline */
tramp = (guint8*)mono_get_trampoline_func (tramp_type);
mips_load (code, mips_t9, (guint32)tramp);
mips_jalr (code, mips_t9, mips_ra);
mips_nop (code);
/* Code address is now in v0, move it to at */
mips_move (code, mips_at, mips_v0);
/*
* Now unwind the MonoLMF
*/
/* t0 = current_lmf->previous_lmf */
mips_lw (code, mips_t0, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, previous_lmf));
/* t1 = lmf_addr */
mips_lw (code, mips_t1, mips_sp, lmf + G_STRUCT_OFFSET(MonoLMF, lmf_addr));
/* (*lmf_addr) = previous_lmf */
mips_sw (code, mips_t0, mips_t1, 0);
/* Restore the callee-saved & argument registers */
for (i = 0; i < MONO_MAX_IREGS; i++) {
if ((MONO_ARCH_CALLEE_SAVED_REGS | MONO_ARCH_CALLEE_REGS | MIPS_ARG_REGS) & (1 << i))
MIPS_LW (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, iregs[i]));
}
for (i = 0; i < MONO_MAX_FREGS; i++)
MIPS_LWC1 (code, i, mips_sp, lmf + G_STRUCT_OFFSET (MonoLMF, fregs[i]));
/* Non-standard function epilogue. Instead of doing a proper
* return, we just jump to the compiled code.
*/
/* Restore ra & stack pointer, and jump to the code */
if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
mips_move (code, mips_v0, mips_at);
mips_lw (code, mips_ra, mips_sp, STACK + MIPS_RET_ADDR_OFFSET);
mips_addiu (code, mips_sp, mips_sp, STACK);
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type))
mips_jr (code, mips_ra);
else
mips_jr (code, mips_at);
mips_nop (code);
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (buf, code - buf);
MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
/* Sanity check */
g_assert ((code - buf) <= max_code_len);
g_assert (info);
tramp_name = mono_get_generic_trampoline_name (tramp_type);
*info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
return buf;
}
gpointer
mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len)
{
guint8 *code, *buf, *tramp;
tramp = mono_get_trampoline_code (tramp_type);
code = buf = mono_mem_manager_code_reserve (mem_manager, 32);
/* Prepare the jump to the generic trampoline code
* mono_arch_create_trampoline_code() knows we're putting this in t8
*/
mips_load (code, mips_t8, arg1);
/* Now jump to the generic trampoline code */
mips_load (code, mips_at, tramp);
mips_jr (code, mips_at);
mips_nop (code);
/* Flush instruction cache, since we've generated code */
mono_arch_flush_icache (buf, code - buf);
MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type)));
g_assert ((code - buf) <= 32);
if (code_len)
*code_len = code - buf;
return buf;
}
gpointer
mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr)
{
guint8 *code, *start;
int buf_len;
buf_len = 24;
start = code = mono_mem_manager_code_reserve (mem_manager, buf_len);
mips_load (code, MONO_ARCH_RGCTX_REG, arg);
mips_load (code, mips_at, addr);
mips_jr (code, mips_at);
mips_nop (code);
g_assert ((code - start) <= buf_len);
mono_arch_flush_icache (start, code - start);
MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), NULL);
return start;
}
gpointer
mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
{
guint8 *tramp;
guint8 *code, *buf;
int tramp_size;
guint32 code_len;
guint8 **rgctx_null_jumps;
int depth, index;
int i, njumps;
gboolean mrgctx;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
index = MONO_RGCTX_SLOT_INDEX (slot);
if (mrgctx)
index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (target_mgreg_t);
for (depth = 0; ; ++depth) {
int size = mono_class_rgctx_get_array_size (depth, mrgctx);
if (index < size - 1)
break;
index -= size - 1;
}
tramp_size = 64 + 16 * depth;
code = buf = mono_global_codeman_reserve (tramp_size);
mono_add_unwind_op_def_cfa (unwind_ops, code, buf, mips_sp, 0);
rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
njumps = 0;
/* The vtable/mrgctx is in a0 */
g_assert (MONO_ARCH_VTABLE_REG == mips_a0);
if (mrgctx) {
/* get mrgctx ptr */
mips_move (code, mips_a1, mips_a0);
} else {
/* load rgctx ptr from vtable */
g_assert (mips_is_imm16 (MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
mips_lw (code, mips_a1, mips_a0, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
/* is the rgctx ptr null? */
/* if yes, jump to actual trampoline */
rgctx_null_jumps [njumps ++] = code;
mips_beq (code, mips_a1, mips_zero, 0);
mips_nop (code);
}
for (i = 0; i < depth; ++i) {
/* load ptr to next array */
if (mrgctx && i == 0) {
g_assert (mips_is_imm16 (MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT));
mips_lw (code, mips_a1, mips_a1, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
} else {
mips_lw (code, mips_a1, mips_a1, 0);
}
/* is the ptr null? */
/* if yes, jump to actual trampoline */
rgctx_null_jumps [njumps ++] = code;
mips_beq (code, mips_a1, mips_zero, 0);
mips_nop (code);
}
/* fetch slot */
g_assert (mips_is_imm16 (sizeof (target_mgreg_t) * (index + 1)));
mips_lw (code, mips_a1, mips_a1, sizeof (target_mgreg_t) * (index + 1));
/* is the slot null? */
/* if yes, jump to actual trampoline */
rgctx_null_jumps [njumps ++] = code;
mips_beq (code, mips_a1, mips_zero, 0);
mips_nop (code);
/* otherwise return, result is in R1 */
mips_move (code, mips_v0, mips_a1);
mips_jr (code, mips_ra);
mips_nop (code);
g_assert (njumps <= depth + 2);
for (i = 0; i < njumps; ++i)
mips_patch ((guint32*)rgctx_null_jumps [i], (guint32)code);
g_free (rgctx_null_jumps);
/* Slowpath */
/* The vtable/mrgctx is still in a0 */
if (aot) {
ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR, GUINT_TO_POINTER (slot));
mips_load (code, mips_at, 0);
mips_jr (code, mips_at);
mips_nop (code);
} else {
MonoMemoryManager *mem_manager = mini_get_default_mem_manager ();
tramp = (guint8*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mem_manager, &code_len);
mips_load (code, mips_at, tramp);
mips_jr (code, mips_at);
mips_nop (code);
}
mono_arch_flush_icache (buf, code - buf);
MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
g_assert (code - buf <= tramp_size);
if (info) {
char *name = mono_get_rgctx_fetch_trampoline_name (slot);
*info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
g_free (name);
}
return buf;
}
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/vm/encee.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// EnCee.h
//
//
// Defines the core VM data structures and methods for support EditAndContinue
//
// ======================================================================================
#ifndef EnC_H
#define EnC_H
#include "ceeload.h"
#include "field.h"
#include "class.h"
#ifdef EnC_SUPPORTED
class FieldDesc;
struct EnCAddedField;
struct EnCAddedStaticField;
class EnCFieldDesc;
class EnCEEClassData;
typedef DPTR(EnCAddedField) PTR_EnCAddedField;
typedef DPTR(EnCAddedStaticField) PTR_EnCAddedStaticField;
typedef DPTR(EnCFieldDesc) PTR_EnCFieldDesc;
typedef DPTR(EnCEEClassData) PTR_EnCEEClassData;
//---------------------------------------------------------------------------------------
//
// EnCFieldDesc - A field descriptor for fields added by EnC
//
// Notes: We need to track some additional data for added fields, since they can't
// simply be glued onto existing object instances like any other field.
//
// For each field added, there is a single instance of this object tied to the type where
// the field was added.
//
class EnCFieldDesc : public FieldDesc
{
public:
// Initialize just the bare minimum necessary now.
// We'll do a proper FieldDesc initialization later when Fixup is called.
void Init( mdFieldDef token, BOOL fIsStatic);
// Compute the address of this field for a specific object
void *GetAddress( void *o);
// Returns true if Fixup still needs to be called
BOOL NeedsFixup()
{
LIMITED_METHOD_DAC_CONTRACT;
return m_bNeedsFixup;
}
// Used to properly configure the FieldDesc after it has been added
// This may do things like load classes (which can trigger a GC), and so can only be
// done after the process has resumed execution.
VOID Fixup(mdFieldDef token)
{
WRAPPER_NO_CONTRACT;
EEClass::FixupFieldDescForEnC(GetEnclosingMethodTable(), this, token);
m_bNeedsFixup = FALSE;
}
// Gets a pointer to the field's contents (assuming this is a static field) if it's
// available or NULL otherwise
EnCAddedStaticField *GetStaticFieldData();
// Gets a pointer to the field's contents (assuming this is a static field) if it's
// available or allocates space for it and returns the address to the allocated field
// Returns a valid address or throws OOM
EnCAddedStaticField * GetOrAllocateStaticFieldData();
private:
// True if Fixup() has been called on this instance
BOOL m_bNeedsFixup;
// For static fields, pointer to where the field value is held
PTR_EnCAddedStaticField m_pStaticFieldData;
};
// EnCAddedFieldElement
// A node in the linked list representing fields added to a class with EnC
typedef DPTR(struct EnCAddedFieldElement) PTR_EnCAddedFieldElement;
struct EnCAddedFieldElement
{
// Pointer to the next element in the list
PTR_EnCAddedFieldElement m_next;
// Details about this field
EnCFieldDesc m_fieldDesc;
// Initialize this entry.
// Basically just sets a couple fields to default values.
// We'll have to go back later and call Fixup on the fieldDesc.
void Init(mdFieldDef token, BOOL fIsStatic)
{
WRAPPER_NO_CONTRACT;
m_next = NULL;
m_fieldDesc.Init(token, fIsStatic);
}
};
//---------------------------------------------------------------------------------------
//
// EnCEEClassData - EnC specific information about this class
//
class EnCEEClassData
{
public:
#ifndef DACCESS_COMPILE
// Initialize all the members
// pClass - the EEClass we're tracking EnC data for
void Init(MethodTable * pMT)
{
LIMITED_METHOD_CONTRACT;
m_pMT = pMT;
m_dwNumAddedInstanceFields = 0;
m_dwNumAddedStaticFields = 0;
m_pAddedInstanceFields = NULL;
m_pAddedStaticFields = NULL;
}
#endif
// Adds the provided new field to the appropriate linked list and updates the appropriate count
void AddField(EnCAddedFieldElement *pAddedField);
// Get the number of instance fields that have been added to this class.
// Since we can only add private fields, these fields can't be seen from any other class but this one.
int GetAddedInstanceFields()
{
SUPPORTS_DAC;
return m_dwNumAddedInstanceFields;
}
// Get the number of static fields that have been added to this class.
int GetAddedStaticFields()
{
SUPPORTS_DAC;
return m_dwNumAddedStaticFields;
}
// Get the methodtable that this EnC data refers to
MethodTable * GetMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
return m_pMT;
}
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif
private:
friend class EEClass;
friend class EncApproxFieldDescIterator;
// The class that this EnC data refers to
PTR_MethodTable m_pMT;
// The number of instance fields that have been added to this class
int m_dwNumAddedInstanceFields;
// The number of static fields that have been added to this class
int m_dwNumAddedStaticFields;
// Linked list of EnCFieldDescs for all the added instance fields
PTR_EnCAddedFieldElement m_pAddedInstanceFields;
// Linked list of EnCFieldDescs for all the added static fields
PTR_EnCAddedFieldElement m_pAddedStaticFields;
};
//---------------------------------------------------------------------------------------
//
// EditAndContinueModule - specialization of the Module class which adds EnC support
//
// Assumptions:
//
// Notes:
//
class EditAndContinueModule : public Module
{
VPTR_VTABLE_CLASS(EditAndContinueModule, Module)
// keep track of the number of changes - this is used to apply a version number
// to an updated function. The version number for a function is the overall edit count,
// ie the number of times ApplyChanges has been called, not the number of times that
// function itself has been edited.
int m_applyChangesCount;
// Holds a table of EnCEEClassData object for classes in this module that have been modified
CUnorderedArray<EnCEEClassData*, 5> m_ClassList;
#ifndef DACCESS_COMPILE
// Return the minimum permissable address for new IL to be stored at
// This can't be less than the current load address because then we'd
// have negative RVAs.
BYTE *GetEnCBase() { return (BYTE *) GetPEAssembly()->GetManagedFileContents(); }
#endif // DACCESS_COMPILE
private:
// Constructor is invoked only by Module::Create
friend Module *Module::Create(Assembly *pAssembly, mdToken moduleRef, PEAssembly *pPEAssembly, AllocMemTracker *pamTracker);
EditAndContinueModule(Assembly *pAssembly, mdToken moduleRef, PEAssembly *pPEAssembly);
protected:
#ifndef DACCESS_COMPILE
// Initialize the module
virtual void Initialize(AllocMemTracker *pamTracker, LPCWSTR szName = NULL);
#endif
public:
#ifndef DACCESS_COMPILE
// Destruct the module when it's finished being unloaded
// Note that due to the loader's allocation mechanism, C++ consturctors and destructors
// wouldn't be called.
virtual void Destruct();
#endif
virtual BOOL IsEditAndContinueCapable() const { return TRUE; }
// Apply an EnC edit
HRESULT ApplyEditAndContinue(DWORD cbMetadata,
BYTE *pMetadata,
DWORD cbIL,
BYTE *pIL);
// Called when a method has been modified (new IL)
HRESULT UpdateMethod(MethodDesc *pMethod);
// Called when a new method has been added to the module's metadata
HRESULT AddMethod(mdMethodDef token);
// Called when a new field has been added to the module's metadata
HRESULT AddField(mdFieldDef token);
// JIT the new version of a function for EnC
PCODE JitUpdatedFunction(MethodDesc *pMD, T_CONTEXT *pContext);
// Remap execution to the latest version of an edited method
HRESULT ResumeInUpdatedFunction(MethodDesc *pMD,
void *oldDebuggerFuncHandle,
SIZE_T newILOffset,
T_CONTEXT *pContext);
// Modify the thread context for EnC remap and resume execution
void FixContextAndResume(MethodDesc *pMD,
void *oldDebuggerFuncHandle,
T_CONTEXT *pContext,
EECodeInfo *pOldCodeInfo,
EECodeInfo *pNewCodeInfo);
// Get a pointer to the value of a field added by EnC or return NULL if it doesn't exist
PTR_CBYTE ResolveField(OBJECTREF thisPointer,
EnCFieldDesc *pFD);
// Get a pointer to the value of a field added by EnC. Allocates if it doesn't exist, so we'll
// return a valid address or throw OOM
PTR_CBYTE ResolveOrAllocateField(OBJECTREF thisPointer,
EnCFieldDesc * pFD);
// Get class-specific EnC data for a class in this module
// Note: For DAC build, getOnly must be TRUE
PTR_EnCEEClassData GetEnCEEClassData(MethodTable * pMT, BOOL getOnly = FALSE);
// Get the number of times edits have been applied to this module
int GetApplyChangesCount()
{
return m_applyChangesCount;
}
#ifdef DACCESS_COMPILE
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
bool enumThis);
#endif
};
// Information about an instance field value added by EnC
// When an instance field is added to an object, we will lazily create an EnCAddedField
// for EACH instance of that object, but there will be a single EnCFieldDesc.
//
// Note that if we were concerned about the overhead when there are lots of instances of
// an object, we could slim this down to just the m_FieldData field by storing a pointer
// to a growable array of these in the EnCSyncBlockInfo, instead of using a linked list, and
// have the EnCFieldDesc specify a field index number.
//
struct EnCAddedField
{
// This field data hangs off the SyncBlock in a linked list.
// This is the pointer to the next field in the list.
PTR_EnCAddedField m_pNext;
// Pointer to the fieldDesc describing which field this refers to
PTR_EnCFieldDesc m_pFieldDesc;
// A dependent handle whose primary object points to the object instance which has been modified,
// and whose secondary object points to an EnC helper object containing a reference to the field value.
OBJECTHANDLE m_FieldData;
// Allocates a new EnCAddedField and hook it up to the object
static EnCAddedField *Allocate(OBJECTREF thisPointer, EnCFieldDesc *pFD);
};
// Information about a static field value added by EnC
// We can't change the MethodTable, so these are hung off the FieldDesc
// Note that the actual size of this type is variable.
struct EnCAddedStaticField
{
// Pointer back to the fieldDesc describing which field this refers to
// This isn't strictly necessary since our callers always know it, but the overhead
// in minimal (per type, not per instance) and this is cleaner and permits an extra sanity check.
PTR_EnCFieldDesc m_pFieldDesc;
// For primitive types, this is the beginning of the actual value.
// For reference types and user-defined value types, it's the beginning of a pointer
// to the object.
// Note that this is intentionally the last field of this structure as it is variably-sized.
// NOTE: It looks like we did the same thing for instance fields in EnCAddedField but then simplified
// it by always storing just an OBJREF which may point to a boxed value type. I suggest we do the
// same here unless we can demonstrate that the extra indirection makes a noticable perf difference
// in scenarios which are important for EnC.
BYTE m_FieldData;
// Get a pointer to the contents of this field
PTR_CBYTE GetFieldData();
// Allocate a new instance appropriate for the specified field
static EnCAddedStaticField *Allocate(EnCFieldDesc *pFD);
};
// EnCSyncBlockInfo lives off an object's SyncBlock and contains a lazily-created linked
// list of the values of all the fields added to the object by EnC
//
// Note that much of the logic here would probably belong better in EnCAddedField since it is
// specific to the implementation there. Perhaps this should ideally just be a container
// that holds a bunch of EnCAddedFields and can iterate over them and map from EnCFieldDesc
// to them.
class EnCSyncBlockInfo
{
public:
// Initialize the list
EnCSyncBlockInfo() :
m_pList(PTR_NULL)
{
}
// Get a pointer to the data in a specific field on this object or return NULL if it
// doesn't exist
PTR_CBYTE ResolveField(OBJECTREF thisPointer,
EnCFieldDesc * pFieldDesc);
// Get a pointer to the data in a specific field on this object. We'll allocate if it doesn't already
// exist, so we'll only fail on OOM
PTR_CBYTE ResolveOrAllocateField(OBJECTREF thisPointer, EnCFieldDesc *pFD);
// Free the data used by this field value. Called after the object instance the
// fields belong to is collected.
void Cleanup();
private:
// Gets the address of an EnC field accounting for its type: valuetype, class or primitive
PTR_CBYTE GetEnCFieldAddrFromHelperFieldDesc(FieldDesc * pHelperFieldDesc,
OBJECTREF pHelper,
EnCFieldDesc * pFD);
// Pointer to the head of the list
PTR_EnCAddedField m_pList;
};
// The DPTR is actually defined in syncblk.h to make it visible to SyncBlock
// typedef DPTR(EnCSyncBlockInfo) PTR_EnCSyncBlockInfo;
#endif // !EnC_SUPPORTED
//---------------------------------------------------------------------------------------
//
// EncApproxFieldDescIterator - Iterates through all fields of a class including ones
// added by EnC
//
// Notes:
// This is just like ApproxFieldDescIterator, but it also includes EnC fields if
// EnC is supported.
// This does not include inherited fields.
// The order the fields returned here is unspecified.
//
// We don't bother maintaining an accurate total and remaining field count like
// ApproxFieldDescIterator because none of our clients need it. But it would
// be easy to add this using the data from m_classData
//
class EncApproxFieldDescIterator
{
public:
#ifdef EnC_SUPPORTED
// Create and initialize the iterator
EncApproxFieldDescIterator(MethodTable *pMT, int iteratorType, BOOL fixupEnC);
// Get the next fieldDesc (either EnC or non-EnC)
PTR_FieldDesc Next();
int Count();
#else
// Non-EnC version - simple wrapper
EncApproxFieldDescIterator(MethodTable *pMT, int iteratorType, BOOL fixupEnC) :
m_nonEnCIter( pMT, iteratorType ) {}
PTR_FieldDesc Next() { WRAPPER_NO_CONTRACT; return m_nonEnCIter.Next(); }
int Count() { WRAPPER_NO_CONTRACT; return m_nonEnCIter.Count(); }
#endif // EnC_SUPPORTED
int GetIteratorType()
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
return m_nonEnCIter.GetIteratorType();
}
private:
// The iterator for the non-EnC fields.
// We delegate to this for alll non-EnC specific stuff
ApproxFieldDescIterator m_nonEnCIter;
#ifdef EnC_SUPPORTED
// Return the next available EnC FieldDesc or NULL when done
PTR_EnCFieldDesc NextEnC();
// True if our client wants us to fixup any EnC fieldDescs before handing them back
BOOL m_fixupEnC;
// A count of how many EnC fields have been returned so far
int m_encFieldsReturned;
// The current pointer into one of the EnC field lists when enumerating EnC fields
PTR_EnCAddedFieldElement m_pCurrListElem;
// EnC specific data for the class of interest.
// NULL if EnC is disabled or this class doesn't have any EnC data
PTR_EnCEEClassData m_encClassData;
#endif
};
#endif // #ifndef EnC_H
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// EnCee.h
//
//
// Defines the core VM data structures and methods for support EditAndContinue
//
// ======================================================================================
#ifndef EnC_H
#define EnC_H
#include "ceeload.h"
#include "field.h"
#include "class.h"
#ifdef EnC_SUPPORTED
class FieldDesc;
struct EnCAddedField;
struct EnCAddedStaticField;
class EnCFieldDesc;
class EnCEEClassData;
typedef DPTR(EnCAddedField) PTR_EnCAddedField;
typedef DPTR(EnCAddedStaticField) PTR_EnCAddedStaticField;
typedef DPTR(EnCFieldDesc) PTR_EnCFieldDesc;
typedef DPTR(EnCEEClassData) PTR_EnCEEClassData;
//---------------------------------------------------------------------------------------
//
// EnCFieldDesc - A field descriptor for fields added by EnC
//
// Notes: We need to track some additional data for added fields, since they can't
// simply be glued onto existing object instances like any other field.
//
// For each field added, there is a single instance of this object tied to the type where
// the field was added.
//
class EnCFieldDesc : public FieldDesc
{
public:
// Initialize just the bare minimum necessary now.
// We'll do a proper FieldDesc initialization later when Fixup is called.
void Init( mdFieldDef token, BOOL fIsStatic);
// Compute the address of this field for a specific object
void *GetAddress( void *o);
// Returns true if Fixup still needs to be called
BOOL NeedsFixup()
{
LIMITED_METHOD_DAC_CONTRACT;
return m_bNeedsFixup;
}
// Used to properly configure the FieldDesc after it has been added
// This may do things like load classes (which can trigger a GC), and so can only be
// done after the process has resumed execution.
VOID Fixup(mdFieldDef token)
{
WRAPPER_NO_CONTRACT;
EEClass::FixupFieldDescForEnC(GetEnclosingMethodTable(), this, token);
m_bNeedsFixup = FALSE;
}
// Gets a pointer to the field's contents (assuming this is a static field) if it's
// available or NULL otherwise
EnCAddedStaticField *GetStaticFieldData();
// Gets a pointer to the field's contents (assuming this is a static field) if it's
// available or allocates space for it and returns the address to the allocated field
// Returns a valid address or throws OOM
EnCAddedStaticField * GetOrAllocateStaticFieldData();
private:
// True if Fixup() has been called on this instance
BOOL m_bNeedsFixup;
// For static fields, pointer to where the field value is held
PTR_EnCAddedStaticField m_pStaticFieldData;
};
// EnCAddedFieldElement
// A node in the linked list representing fields added to a class with EnC
typedef DPTR(struct EnCAddedFieldElement) PTR_EnCAddedFieldElement;
struct EnCAddedFieldElement
{
// Pointer to the next element in the list
PTR_EnCAddedFieldElement m_next;
// Details about this field
EnCFieldDesc m_fieldDesc;
// Initialize this entry.
// Basically just sets a couple fields to default values.
// We'll have to go back later and call Fixup on the fieldDesc.
void Init(mdFieldDef token, BOOL fIsStatic)
{
WRAPPER_NO_CONTRACT;
m_next = NULL;
m_fieldDesc.Init(token, fIsStatic);
}
};
//---------------------------------------------------------------------------------------
//
// EnCEEClassData - EnC specific information about this class
//
class EnCEEClassData
{
public:
#ifndef DACCESS_COMPILE
// Initialize all the members
// pClass - the EEClass we're tracking EnC data for
void Init(MethodTable * pMT)
{
LIMITED_METHOD_CONTRACT;
m_pMT = pMT;
m_dwNumAddedInstanceFields = 0;
m_dwNumAddedStaticFields = 0;
m_pAddedInstanceFields = NULL;
m_pAddedStaticFields = NULL;
}
#endif
// Adds the provided new field to the appropriate linked list and updates the appropriate count
void AddField(EnCAddedFieldElement *pAddedField);
// Get the number of instance fields that have been added to this class.
// Since we can only add private fields, these fields can't be seen from any other class but this one.
int GetAddedInstanceFields()
{
SUPPORTS_DAC;
return m_dwNumAddedInstanceFields;
}
// Get the number of static fields that have been added to this class.
int GetAddedStaticFields()
{
SUPPORTS_DAC;
return m_dwNumAddedStaticFields;
}
// Get the methodtable that this EnC data refers to
MethodTable * GetMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
return m_pMT;
}
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif
private:
friend class EEClass;
friend class EncApproxFieldDescIterator;
// The class that this EnC data refers to
PTR_MethodTable m_pMT;
// The number of instance fields that have been added to this class
int m_dwNumAddedInstanceFields;
// The number of static fields that have been added to this class
int m_dwNumAddedStaticFields;
// Linked list of EnCFieldDescs for all the added instance fields
PTR_EnCAddedFieldElement m_pAddedInstanceFields;
// Linked list of EnCFieldDescs for all the added static fields
PTR_EnCAddedFieldElement m_pAddedStaticFields;
};
//---------------------------------------------------------------------------------------
//
// EditAndContinueModule - specialization of the Module class which adds EnC support
//
// Assumptions:
//
// Notes:
//
class EditAndContinueModule : public Module
{
VPTR_VTABLE_CLASS(EditAndContinueModule, Module)
// keep track of the number of changes - this is used to apply a version number
// to an updated function. The version number for a function is the overall edit count,
// ie the number of times ApplyChanges has been called, not the number of times that
// function itself has been edited.
int m_applyChangesCount;
// Holds a table of EnCEEClassData object for classes in this module that have been modified
CUnorderedArray<EnCEEClassData*, 5> m_ClassList;
#ifndef DACCESS_COMPILE
// Return the minimum permissable address for new IL to be stored at
// This can't be less than the current load address because then we'd
// have negative RVAs.
BYTE *GetEnCBase() { return (BYTE *) GetPEAssembly()->GetManagedFileContents(); }
#endif // DACCESS_COMPILE
private:
// Constructor is invoked only by Module::Create
friend Module *Module::Create(Assembly *pAssembly, mdToken moduleRef, PEAssembly *pPEAssembly, AllocMemTracker *pamTracker);
EditAndContinueModule(Assembly *pAssembly, mdToken moduleRef, PEAssembly *pPEAssembly);
protected:
#ifndef DACCESS_COMPILE
// Initialize the module
virtual void Initialize(AllocMemTracker *pamTracker, LPCWSTR szName = NULL);
#endif
public:
#ifndef DACCESS_COMPILE
// Destruct the module when it's finished being unloaded
// Note that due to the loader's allocation mechanism, C++ consturctors and destructors
// wouldn't be called.
virtual void Destruct();
#endif
virtual BOOL IsEditAndContinueCapable() const { return TRUE; }
// Apply an EnC edit
HRESULT ApplyEditAndContinue(DWORD cbMetadata,
BYTE *pMetadata,
DWORD cbIL,
BYTE *pIL);
// Called when a method has been modified (new IL)
HRESULT UpdateMethod(MethodDesc *pMethod);
// Called when a new method has been added to the module's metadata
HRESULT AddMethod(mdMethodDef token);
// Called when a new field has been added to the module's metadata
HRESULT AddField(mdFieldDef token);
// JIT the new version of a function for EnC
PCODE JitUpdatedFunction(MethodDesc *pMD, T_CONTEXT *pContext);
// Remap execution to the latest version of an edited method
HRESULT ResumeInUpdatedFunction(MethodDesc *pMD,
void *oldDebuggerFuncHandle,
SIZE_T newILOffset,
T_CONTEXT *pContext);
// Modify the thread context for EnC remap and resume execution
void FixContextAndResume(MethodDesc *pMD,
void *oldDebuggerFuncHandle,
T_CONTEXT *pContext,
EECodeInfo *pOldCodeInfo,
EECodeInfo *pNewCodeInfo);
// Get a pointer to the value of a field added by EnC or return NULL if it doesn't exist
PTR_CBYTE ResolveField(OBJECTREF thisPointer,
EnCFieldDesc *pFD);
// Get a pointer to the value of a field added by EnC. Allocates if it doesn't exist, so we'll
// return a valid address or throw OOM
PTR_CBYTE ResolveOrAllocateField(OBJECTREF thisPointer,
EnCFieldDesc * pFD);
// Get class-specific EnC data for a class in this module
// Note: For DAC build, getOnly must be TRUE
PTR_EnCEEClassData GetEnCEEClassData(MethodTable * pMT, BOOL getOnly = FALSE);
// Get the number of times edits have been applied to this module
int GetApplyChangesCount()
{
return m_applyChangesCount;
}
#ifdef DACCESS_COMPILE
virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
bool enumThis);
#endif
};
// Information about an instance field value added by EnC
// When an instance field is added to an object, we will lazily create an EnCAddedField
// for EACH instance of that object, but there will be a single EnCFieldDesc.
//
// Note that if we were concerned about the overhead when there are lots of instances of
// an object, we could slim this down to just the m_FieldData field by storing a pointer
// to a growable array of these in the EnCSyncBlockInfo, instead of using a linked list, and
// have the EnCFieldDesc specify a field index number.
//
struct EnCAddedField
{
// This field data hangs off the SyncBlock in a linked list.
// This is the pointer to the next field in the list.
PTR_EnCAddedField m_pNext;
// Pointer to the fieldDesc describing which field this refers to
PTR_EnCFieldDesc m_pFieldDesc;
// A dependent handle whose primary object points to the object instance which has been modified,
// and whose secondary object points to an EnC helper object containing a reference to the field value.
OBJECTHANDLE m_FieldData;
// Allocates a new EnCAddedField and hook it up to the object
static EnCAddedField *Allocate(OBJECTREF thisPointer, EnCFieldDesc *pFD);
};
// Information about a static field value added by EnC
// We can't change the MethodTable, so these are hung off the FieldDesc
// Note that the actual size of this type is variable.
struct EnCAddedStaticField
{
// Pointer back to the fieldDesc describing which field this refers to
// This isn't strictly necessary since our callers always know it, but the overhead
// in minimal (per type, not per instance) and this is cleaner and permits an extra sanity check.
PTR_EnCFieldDesc m_pFieldDesc;
// For primitive types, this is the beginning of the actual value.
// For reference types and user-defined value types, it's the beginning of a pointer
// to the object.
// Note that this is intentionally the last field of this structure as it is variably-sized.
// NOTE: It looks like we did the same thing for instance fields in EnCAddedField but then simplified
// it by always storing just an OBJREF which may point to a boxed value type. I suggest we do the
// same here unless we can demonstrate that the extra indirection makes a noticable perf difference
// in scenarios which are important for EnC.
BYTE m_FieldData;
// Get a pointer to the contents of this field
PTR_CBYTE GetFieldData();
// Allocate a new instance appropriate for the specified field
static EnCAddedStaticField *Allocate(EnCFieldDesc *pFD);
};
// EnCSyncBlockInfo lives off an object's SyncBlock and contains a lazily-created linked
// list of the values of all the fields added to the object by EnC
//
// Note that much of the logic here would probably belong better in EnCAddedField since it is
// specific to the implementation there. Perhaps this should ideally just be a container
// that holds a bunch of EnCAddedFields and can iterate over them and map from EnCFieldDesc
// to them.
class EnCSyncBlockInfo
{
public:
// Initialize the list
EnCSyncBlockInfo() :
m_pList(PTR_NULL)
{
}
// Get a pointer to the data in a specific field on this object or return NULL if it
// doesn't exist
PTR_CBYTE ResolveField(OBJECTREF thisPointer,
EnCFieldDesc * pFieldDesc);
// Get a pointer to the data in a specific field on this object. We'll allocate if it doesn't already
// exist, so we'll only fail on OOM
PTR_CBYTE ResolveOrAllocateField(OBJECTREF thisPointer, EnCFieldDesc *pFD);
// Free the data used by this field value. Called after the object instance the
// fields belong to is collected.
void Cleanup();
private:
// Gets the address of an EnC field accounting for its type: valuetype, class or primitive
PTR_CBYTE GetEnCFieldAddrFromHelperFieldDesc(FieldDesc * pHelperFieldDesc,
OBJECTREF pHelper,
EnCFieldDesc * pFD);
// Pointer to the head of the list
PTR_EnCAddedField m_pList;
};
// The DPTR is actually defined in syncblk.h to make it visible to SyncBlock
// typedef DPTR(EnCSyncBlockInfo) PTR_EnCSyncBlockInfo;
#endif // !EnC_SUPPORTED
//---------------------------------------------------------------------------------------
//
// EncApproxFieldDescIterator - Iterates through all fields of a class including ones
// added by EnC
//
// Notes:
// This is just like ApproxFieldDescIterator, but it also includes EnC fields if
// EnC is supported.
// This does not include inherited fields.
// The order the fields returned here is unspecified.
//
// We don't bother maintaining an accurate total and remaining field count like
// ApproxFieldDescIterator because none of our clients need it. But it would
// be easy to add this using the data from m_classData
//
class EncApproxFieldDescIterator
{
public:
#ifdef EnC_SUPPORTED
// Create and initialize the iterator
EncApproxFieldDescIterator(MethodTable *pMT, int iteratorType, BOOL fixupEnC);
// Get the next fieldDesc (either EnC or non-EnC)
PTR_FieldDesc Next();
int Count();
#else
// Non-EnC version - simple wrapper
EncApproxFieldDescIterator(MethodTable *pMT, int iteratorType, BOOL fixupEnC) :
m_nonEnCIter( pMT, iteratorType ) {}
PTR_FieldDesc Next() { WRAPPER_NO_CONTRACT; return m_nonEnCIter.Next(); }
int Count() { WRAPPER_NO_CONTRACT; return m_nonEnCIter.Count(); }
#endif // EnC_SUPPORTED
int GetIteratorType()
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
return m_nonEnCIter.GetIteratorType();
}
private:
// The iterator for the non-EnC fields.
// We delegate to this for alll non-EnC specific stuff
ApproxFieldDescIterator m_nonEnCIter;
#ifdef EnC_SUPPORTED
// Return the next available EnC FieldDesc or NULL when done
PTR_EnCFieldDesc NextEnC();
// True if our client wants us to fixup any EnC fieldDescs before handing them back
BOOL m_fixupEnC;
// A count of how many EnC fields have been returned so far
int m_encFieldsReturned;
// The current pointer into one of the EnC field lists when enumerating EnC fields
PTR_EnCAddedFieldElement m_pCurrListElem;
// EnC specific data for the class of interest.
// NULL if EnC is disabled or this class doesn't have any EnC data
PTR_EnCEEClassData m_encClassData;
#endif
};
#endif // #ifndef EnC_H
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/libs/System.Native/pal_runtimeinformation.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_config.h"
#include "pal_runtimeinformation.h"
#include "pal_types.h"
#include <stdio.h>
#include <string.h>
#include <sys/utsname.h>
#if defined(TARGET_ANDROID)
#include <sys/system_properties.h>
#endif
const char* SystemNative_GetUnixName()
{
return PAL_UNIX_NAME;
}
char* SystemNative_GetUnixRelease()
{
#if defined(TARGET_ANDROID)
// get the Android API level
char sdk_ver_str[PROP_VALUE_MAX];
if (__system_property_get("ro.build.version.sdk", sdk_ver_str))
{
return strdup(sdk_ver_str);
}
else
{
return NULL;
}
#else
struct utsname _utsname;
return uname(&_utsname) != -1 ?
strdup(_utsname.release) :
NULL;
#endif
}
int32_t SystemNative_GetUnixVersion(char* version, int* capacity)
{
struct utsname _utsname;
if (uname(&_utsname) != -1)
{
int r = snprintf(version, (size_t)(*capacity), "%s %s %s", _utsname.sysname, _utsname.release, _utsname.version);
if (r > *capacity)
{
*capacity = r + 1;
return -1;
}
}
return 0;
}
/* Returns an int representing the OS Architecture. -1 if same as process architecture. */
int32_t SystemNative_GetOSArchitecture()
{
return -1;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_config.h"
#include "pal_runtimeinformation.h"
#include "pal_types.h"
#include <stdio.h>
#include <string.h>
#include <sys/utsname.h>
#if defined(TARGET_ANDROID)
#include <sys/system_properties.h>
#endif
const char* SystemNative_GetUnixName()
{
return PAL_UNIX_NAME;
}
char* SystemNative_GetUnixRelease()
{
#if defined(TARGET_ANDROID)
// get the Android API level
char sdk_ver_str[PROP_VALUE_MAX];
if (__system_property_get("ro.build.version.sdk", sdk_ver_str))
{
return strdup(sdk_ver_str);
}
else
{
return NULL;
}
#else
struct utsname _utsname;
return uname(&_utsname) != -1 ?
strdup(_utsname.release) :
NULL;
#endif
}
int32_t SystemNative_GetUnixVersion(char* version, int* capacity)
{
struct utsname _utsname;
if (uname(&_utsname) != -1)
{
int r = snprintf(version, (size_t)(*capacity), "%s %s %s", _utsname.sysname, _utsname.release, _utsname.version);
if (r > *capacity)
{
*capacity = r + 1;
return -1;
}
}
return 0;
}
/* Returns an int representing the OS Architecture. -1 if same as process architecture. */
int32_t SystemNative_GetOSArchitecture()
{
return -1;
}
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/libs/System.Native/pal_datetime.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_config.h"
#include "pal_datetime.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#if defined(TARGET_ANDROID)
#include <sys/system_properties.h>
#endif
#include <time.h>
static const int64_t TICKS_PER_SECOND = 10000000; /* 10^7 */
#if HAVE_CLOCK_REALTIME
static const int64_t NANOSECONDS_PER_TICK = 100;
#else
static const int64_t TICKS_PER_MICROSECOND = 10; /* 1000 / 100 */
#endif
//
// SystemNative_GetSystemTimeAsTicks return the system time as ticks (100 nanoseconds)
// since 00:00 01 January 1970 UTC (Unix epoch)
//
int64_t SystemNative_GetSystemTimeAsTicks()
{
#if HAVE_CLOCK_REALTIME
struct timespec time;
if (clock_gettime(CLOCK_REALTIME, &time) == 0)
{
return (int64_t)(time.tv_sec) * TICKS_PER_SECOND + (time.tv_nsec / NANOSECONDS_PER_TICK);
}
#else
struct timeval time;
if (gettimeofday(&time, NULL) == 0)
{
return (int64_t)(time.tv_sec) * TICKS_PER_SECOND + (time.tv_usec * TICKS_PER_MICROSECOND);
}
#endif
// in failure we return 00:00 01 January 1970 UTC (Unix epoch)
return 0;
}
#if defined(TARGET_ANDROID)
char* SystemNative_GetDefaultTimeZone()
{
char defaulttimezone[PROP_VALUE_MAX];
if (__system_property_get("persist.sys.timezone", defaulttimezone))
{
return strdup(defaulttimezone);
}
else
{
return NULL;
}
}
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "pal_config.h"
#include "pal_datetime.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#if defined(TARGET_ANDROID)
#include <sys/system_properties.h>
#endif
#include <time.h>
static const int64_t TICKS_PER_SECOND = 10000000; /* 10^7 */
#if HAVE_CLOCK_REALTIME
static const int64_t NANOSECONDS_PER_TICK = 100;
#else
static const int64_t TICKS_PER_MICROSECOND = 10; /* 1000 / 100 */
#endif
//
// SystemNative_GetSystemTimeAsTicks return the system time as ticks (100 nanoseconds)
// since 00:00 01 January 1970 UTC (Unix epoch)
//
int64_t SystemNative_GetSystemTimeAsTicks()
{
#if HAVE_CLOCK_REALTIME
struct timespec time;
if (clock_gettime(CLOCK_REALTIME, &time) == 0)
{
return (int64_t)(time.tv_sec) * TICKS_PER_SECOND + (time.tv_nsec / NANOSECONDS_PER_TICK);
}
#else
struct timeval time;
if (gettimeofday(&time, NULL) == 0)
{
return (int64_t)(time.tv_sec) * TICKS_PER_SECOND + (time.tv_usec * TICKS_PER_MICROSECOND);
}
#endif
// in failure we return 00:00 01 January 1970 UTC (Unix epoch)
return 0;
}
#if defined(TARGET_ANDROID)
char* SystemNative_GetDefaultTimeZone()
{
char defaulttimezone[PROP_VALUE_MAX];
if (__system_property_get("persist.sys.timezone", defaulttimezone))
{
return strdup(defaulttimezone);
}
else
{
return NULL;
}
}
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/vm/field.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// COM+ Data Field Abstraction
//
#ifndef _FIELD_H_
#define _FIELD_H_
#include "excep.h"
// Temporary values stored in FieldDesc m_dwOffset during loading
// The high 5 bits must be zero (because in field.h we steal them for other uses), so we must choose values > 0
#define FIELD_OFFSET_MAX ((1<<27)-1)
#define FIELD_OFFSET_UNPLACED FIELD_OFFSET_MAX
#define FIELD_OFFSET_UNPLACED_GC_PTR (FIELD_OFFSET_MAX-1)
#define FIELD_OFFSET_VALUE_CLASS (FIELD_OFFSET_MAX-2)
#define FIELD_OFFSET_NOT_REAL_FIELD (FIELD_OFFSET_MAX-3)
// Offset to indicate an EnC added field. They don't have offsets as aren't placed in the object.
#define FIELD_OFFSET_NEW_ENC (FIELD_OFFSET_MAX-4)
#define FIELD_OFFSET_BIG_RVA (FIELD_OFFSET_MAX-5)
#define FIELD_OFFSET_LAST_REAL_OFFSET (FIELD_OFFSET_MAX-6) // real fields have to be smaller than this
//
// This describes a field - one of this is allocated for every field, so don't make this structure any larger.
//
// @GENERICS:
// Field descriptors for fields in instantiated types may be shared between compatible instantiations
// Hence for reflection it's necessary to pair a field desc with the exact owning type handle
class FieldDesc
{
friend class MethodTableBuilder;
protected:
PTR_MethodTable m_pMTOfEnclosingClass; // This is used to hold the log2 of the field size temporarily during class loading. Yuck.
// See also: FieldDesc::InitializeFrom method
#if defined(DACCESS_COMPILE)
union { //create a union so I can get the correct offset for ClrDump.
unsigned m_dword1;
struct {
#endif
// Note that we may store other information in the high bits if available --
// see enum_packedMBLayout and m_requiresFullMbValue for details.
unsigned m_mb : 24;
// 8 bits...
unsigned m_isStatic : 1;
unsigned m_isThreadLocal : 1;
unsigned m_isRVA : 1;
unsigned m_prot : 3;
// Does this field's mb require all 24 bits
unsigned m_requiresFullMbValue : 1;
#if defined(DACCESS_COMPILE)
};
};
#endif
#if defined(DACCESS_COMPILE)
union { //create a union so I can get the correct offset for ClrDump
unsigned m_dword2;
struct {
#endif
// Note: this has been as low as 22 bits in the past & seemed to be OK.
// we can steal some more bits here if we need them.
unsigned m_dwOffset : 27;
unsigned m_type : 5;
#if defined(DACCESS_COMPILE)
};
};
#endif
#ifdef _DEBUG
LPUTF8 m_debugName;
#endif
public:
// Allocated by special heap means, don't construct me
FieldDesc() =delete;
#ifndef DACCESS_COMPILE
void InitializeFrom(const FieldDesc& sourceField, MethodTable *pMT)
{
m_pMTOfEnclosingClass = pMT;
m_mb = sourceField.m_mb;
m_isStatic = sourceField.m_isStatic;
m_isThreadLocal = sourceField.m_isThreadLocal;
m_isRVA = sourceField.m_isRVA;
m_prot = sourceField.m_prot;
m_requiresFullMbValue = sourceField.m_requiresFullMbValue;
m_dwOffset = sourceField.m_dwOffset;
m_type = sourceField.m_type;
#ifdef _DEBUG
m_debugName = sourceField.m_debugName;
#endif // _DEBUG
}
#endif // !DACCESS_COMPILE
#ifdef _DEBUG
inline LPUTF8 GetDebugName()
{
LIMITED_METHOD_CONTRACT;
return m_debugName;
}
#endif // _DEBUG
#ifndef DACCESS_COMPILE
// This should be called. It was added so that Reflection
// can create FieldDesc's for the static primitive fields that aren't
// stored with the EEClass.
void SetMethodTable(MethodTable* mt)
{
LIMITED_METHOD_CONTRACT;
m_pMTOfEnclosingClass = mt;
}
#endif
VOID Init(mdFieldDef mb,
CorElementType FieldType,
DWORD dwMemberAttrs,
BOOL fIsStatic,
BOOL fIsRVA,
BOOL fIsThreadLocal,
LPCSTR pszFieldName);
enum {
enum_packedMbLayout_MbMask = 0x01FFFF,
enum_packedMbLayout_NameHashMask = 0xFE0000
};
void SetMemberDef(mdFieldDef mb)
{
WRAPPER_NO_CONTRACT;
// Check if we have to avoid using the packed mb layout
if (RidFromToken(mb) > enum_packedMbLayout_MbMask)
{
m_requiresFullMbValue = 1;
}
// Set only the portion of m_mb we are using
if (!m_requiresFullMbValue)
{
m_mb &= ~enum_packedMbLayout_MbMask;
m_mb |= RidFromToken(mb);
}
else
{
m_mb = RidFromToken(mb);
}
}
mdFieldDef GetMemberDef() const
{
LIMITED_METHOD_DAC_CONTRACT;
// Check if this FieldDesc is using the packed mb layout
if (!m_requiresFullMbValue)
{
return TokenFromRid(m_mb & enum_packedMbLayout_MbMask, mdtFieldDef);
}
return TokenFromRid(m_mb, mdtFieldDef);
}
CorElementType GetFieldType()
{
LIMITED_METHOD_DAC_CONTRACT;
// Set in code:FieldDesc.Init which in turn is called from
// code:MethodTableBuilder.InitializeFieldDescs#InitCall which in turn calls
// code:MethodTableBuilder.InitializeFieldDescs#FieldDescTypeMorph
return (CorElementType) m_type;
}
DWORD GetFieldProtection()
{
LIMITED_METHOD_CONTRACT;
// Set in code:FieldDesc.Init which in turn is called from code:MethodTableBuilder::InitializeFieldDescs#InitCall
return m_prot;
}
// Please only use this in a path that you have already guarenteed
// the assert is true
DWORD GetOffsetUnsafe()
{
LIMITED_METHOD_CONTRACT;
g_IBCLogger.LogFieldDescsAccess(this);
_ASSERTE(m_dwOffset <= FIELD_OFFSET_LAST_REAL_OFFSET);
return m_dwOffset;
}
DWORD GetOffset()
{
LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogFieldDescsAccess(this);
return GetOffset_NoLogging();
}
// During class load m_pMTOfEnclosingClass has the field size in it, so it has to use this version of
// GetOffset during that time
DWORD GetOffset_NoLogging()
{
LIMITED_METHOD_DAC_CONTRACT;
// Note FieldDescs are no longer on "hot" paths so the optimized code here
// does not look necessary.
if (m_dwOffset != FIELD_OFFSET_BIG_RVA) {
// Assert that the big RVA case handling doesn't get out of sync
// with the normal RVA case.
#ifdef _DEBUG
// The OutOfLine_BigRVAOffset() can't be correctly evaluated during the time
// that we repurposed m_pMTOfEnclosingClass for holding the field size
// I don't see any good way to determine when this is so hurray for
// heuristics!
//
// As of 4/11/2012 I could repro this by turning on the COMPLUS log and
// the LOG() at line methodtablebuilder.cpp:7845
// MethodTableBuilder::PlaceRegularStaticFields() calls GetOffset_NoLogging()
if((DWORD)(DWORD_PTR&)m_pMTOfEnclosingClass > 16)
{
_ASSERTE(!this->IsRVA() || (m_dwOffset == OutOfLine_BigRVAOffset()));
}
#endif
return m_dwOffset;
}
return OutOfLine_BigRVAOffset();
}
DWORD OutOfLine_BigRVAOffset()
{
LIMITED_METHOD_DAC_CONTRACT;
DWORD rva;
// <NICE>I'm discarding a potential error here. According to the code in MDInternalRO.cpp,
// we won't get an error if we initially found the RVA. So I'm going to just
// assert it never happens.
//
// This is a small sin, but I don't see a good alternative. --cwb.</NICE>
HRESULT hr;
hr = GetMDImport()->GetFieldRVA(GetMemberDef(), &rva);
_ASSERTE(SUCCEEDED(hr));
return rva;
}
HRESULT SetOffset(DWORD dwOffset)
{
LIMITED_METHOD_CONTRACT;
//
// value class fields must be aligned to pointer-sized boundaries
//
//
// This is commented out because it isn't valid in all cases.
// This is still here because it is useful for finding alignment
// problems on IA64.
//
//_ASSERTE((dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET) ||
// (ELEMENT_TYPE_VALUETYPE != GetFieldType()) ||
// (IS_ALIGNED(dwOffset, sizeof(void*))));
m_dwOffset = dwOffset;
return((dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET) ? COR_E_TYPELOAD : S_OK);
}
// Okay, we've stolen too many bits from FieldDescs. In the RVA case, there's no
// reason to believe they will be limited to 22 bits. So use a sentinel for the
// huge cases, and recover them from metadata on-demand.
void SetOffsetRVA(DWORD dwOffset)
{
LIMITED_METHOD_CONTRACT;
m_dwOffset = (dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET)
? FIELD_OFFSET_BIG_RVA
: dwOffset;
}
DWORD IsStatic() const
{
LIMITED_METHOD_DAC_CONTRACT;
return m_isStatic;
}
BOOL IsSpecialStatic()
{
LIMITED_METHOD_CONTRACT;
return m_isStatic && (m_isRVA || m_isThreadLocal
);
}
BOOL IsRVA() const // Has an explicit RVA associated with it
{
LIMITED_METHOD_DAC_CONTRACT;
return m_isRVA;
}
BOOL IsThreadStatic() const // Static relative to a thread
{
LIMITED_METHOD_DAC_CONTRACT;
return m_isThreadLocal;
}
// Indicate that this field was added by EnC
// Must only be called on instances of EnCFieldDesc
void SetEnCNew()
{
WRAPPER_NO_CONTRACT;
// EnC added fields don't live in the actual object, so don't have a real offset
SetOffset(FIELD_OFFSET_NEW_ENC);
}
// Was this field added by EnC?
// If this is true, then this object is an instance of EnCFieldDesc
BOOL IsEnCNew()
{
LIMITED_METHOD_DAC_CONTRACT;
// EnC added fields don't have a real offset
return m_dwOffset == FIELD_OFFSET_NEW_ENC;
}
BOOL IsByValue()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetFieldType() == ELEMENT_TYPE_VALUETYPE;
}
BOOL IsPrimitive()
{
LIMITED_METHOD_DAC_CONTRACT;
return (CorIsPrimitiveType(GetFieldType()) != FALSE);
}
BOOL IsObjRef();
BOOL IsByRef();
UINT LoadSize();
// Return -1 if the type isn't loaded yet (i.e. if LookupFieldTypeHandle() would return null)
UINT GetSize();
// These routines encapsulate the operation of getting and setting
// fields.
void GetInstanceField(OBJECTREF o, VOID * pOutVal);
void SetInstanceField(OBJECTREF o, const VOID * pInVal);
void* GetInstanceAddress(OBJECTREF o);
// Get the address of a field within object 'o'
PTR_VOID GetAddress(PTR_VOID o);
PTR_VOID GetAddressNoThrowNoGC(PTR_VOID o);
void* GetAddressGuaranteedInHeap(void *o);
void* GetValuePtr(OBJECTREF o);
VOID SetValuePtr(OBJECTREF o, void* pValue);
DWORD GetValue32(OBJECTREF o);
VOID SetValue32(OBJECTREF o, DWORD dwValue);
OBJECTREF GetRefValue(OBJECTREF o);
VOID SetRefValue(OBJECTREF o, OBJECTREF orValue);
USHORT GetValue16(OBJECTREF o);
VOID SetValue16(OBJECTREF o, DWORD dwValue);
BYTE GetValue8(OBJECTREF o);
VOID SetValue8(OBJECTREF o, DWORD dwValue);
__int64 GetValue64(OBJECTREF o);
VOID SetValue64(OBJECTREF o, __int64 value);
PTR_MethodTable GetApproxEnclosingMethodTable_NoLogging()
{
LIMITED_METHOD_DAC_CONTRACT;
return m_pMTOfEnclosingClass;
}
PTR_MethodTable GetApproxEnclosingMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogFieldDescsAccess(this);
return GetApproxEnclosingMethodTable_NoLogging();
}
PTR_MethodTable GetEnclosingMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
_ASSERTE(!IsSharedByGenericInstantiations());
return GetApproxEnclosingMethodTable();
}
// FieldDesc can be shared between generic instantiations. So List<String>._items
// is really the same as List<__Canon>._items. Hence, the FieldDesc itself
// cannot know the exact enclosing type. You need to provide the exact owner
// like List<String> or a subtype like MyInheritedList<String>.
MethodTable * GetExactDeclaringType(MethodTable * ownerOrSubType);
BOOL IsSharedByGenericInstantiations()
{
LIMITED_METHOD_DAC_CONTRACT;
return (!IsStatic()) && GetApproxEnclosingMethodTable()->IsSharedByGenericInstantiations();
}
BOOL IsFieldOfValueType()
{
WRAPPER_NO_CONTRACT;
return GetApproxEnclosingMethodTable()->IsValueType();
}
DWORD GetNumGenericClassArgs()
{
WRAPPER_NO_CONTRACT;
return GetApproxEnclosingMethodTable()->GetNumGenericArgs();
}
PTR_BYTE GetBaseInDomainLocalModule(DomainLocalModule * pLocalModule)
{
WRAPPER_NO_CONTRACT;
if (GetFieldType() == ELEMENT_TYPE_CLASS || GetFieldType() == ELEMENT_TYPE_VALUETYPE)
{
return pLocalModule->GetGCStaticsBasePointer(GetEnclosingMethodTable());
}
else
{
return pLocalModule->GetNonGCStaticsBasePointer(GetEnclosingMethodTable());
}
}
PTR_BYTE GetBase()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END
MethodTable *pMT = GetEnclosingMethodTable();
return GetBaseInDomainLocalModule(pMT->GetDomainLocalModule());
}
// returns the address of the field
void* GetStaticAddress(void *base);
// In all cases except Value classes, the AddressHandle is
// simply the address of the static. For the case of value
// types, however, it is the address of OBJECTREF that holds
// the boxed value used to hold the value type. This is needed
// because the OBJECTREF moves, and the JIT needs to embed something
// in the code that does not move. Thus the jit has to
// dereference and unbox before the access.
PTR_VOID GetStaticAddressHandle(PTR_VOID base);
#ifndef DACCESS_COMPILE
OBJECTREF GetStaticOBJECTREF()
{
WRAPPER_NO_CONTRACT;
return *(OBJECTREF *)GetCurrentStaticAddress();
}
VOID SetStaticOBJECTREF(OBJECTREF objRef);
void* GetStaticValuePtr()
{
WRAPPER_NO_CONTRACT;
return *(void**)GetCurrentStaticAddress();
}
VOID SetStaticValuePtr(void *value)
{
WRAPPER_NO_CONTRACT;
*(void**)GetCurrentStaticAddress() = value;
}
DWORD GetStaticValue32()
{
WRAPPER_NO_CONTRACT;
return *(DWORD*)GetCurrentStaticAddress();
}
VOID SetStaticValue32(DWORD dwValue)
{
WRAPPER_NO_CONTRACT;
*(DWORD*)GetCurrentStaticAddress() = dwValue;
}
USHORT GetStaticValue16()
{
WRAPPER_NO_CONTRACT;
return *(USHORT*)GetCurrentStaticAddress();
}
VOID SetStaticValue16(DWORD dwValue)
{
WRAPPER_NO_CONTRACT;
*(USHORT*)GetCurrentStaticAddress() = (USHORT)dwValue;
}
BYTE GetStaticValue8()
{
WRAPPER_NO_CONTRACT;
return *(BYTE*)GetCurrentStaticAddress();
}
VOID SetStaticValue8(DWORD dwValue)
{
WRAPPER_NO_CONTRACT;
*(BYTE*)GetCurrentStaticAddress() = (BYTE)dwValue;
}
__int64 GetStaticValue64()
{
WRAPPER_NO_CONTRACT;
return *(__int64*)GetCurrentStaticAddress();
}
VOID SetStaticValue64(__int64 qwValue)
{
WRAPPER_NO_CONTRACT;
*(__int64*)GetCurrentStaticAddress() = qwValue;
}
void* GetCurrentStaticAddress()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END
_ASSERTE(IsStatic());
if (IsThreadStatic())
{
return Thread::GetStaticFieldAddress(this);
}
else {
PTR_BYTE base = 0;
if (!IsRVA()) // for RVA the base is ignored
base = GetBase();
return GetStaticAddress((void *)dac_cast<TADDR>(base));
}
}
VOID CheckRunClassInitThrowing()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END;
GetEnclosingMethodTable()->CheckRunClassInitThrowing();
}
#endif //DACCESS_COMPILE
Module *GetModule()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetApproxEnclosingMethodTable()->GetModule();
}
Module *GetLoaderModule()
{
WRAPPER_NO_CONTRACT;
// Field Desc's are currently always saved into the same module as their
// corresponding method table.
return GetApproxEnclosingMethodTable()->GetLoaderModule();
}
void GetSig(PCCOR_SIGNATURE *ppSig, DWORD *pcSig)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
if (FAILED(GetMDImport()->GetSigOfFieldDef(GetMemberDef(), pcSig, ppSig)))
{ // Class loader already asked for signature, so this should always succeed (unless there's a
// bug or a new code path)
_ASSERTE(!"If this ever fires, then this method should return HRESULT");
*ppSig = NULL;
*pcSig = 0;
}
}
SigPointer GetSigPointer()
{
WRAPPER_NO_CONTRACT;
PCCOR_SIGNATURE pSig;
DWORD cSig;
GetSig(&pSig, &cSig);
return SigPointer(pSig, cSig);
}
// This is slow (uses MetaData), don't use it!
LPCUTF8 GetName()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
LPCSTR szName;
IfFailThrow(GetMDImport()->GetNameOfFieldDef(GetMemberDef(), &szName));
_ASSERTE(szName != NULL);
return szName;
}
// This is slow (uses MetaData), don't use it!
__checkReturn
HRESULT GetName_NoThrow(LPCUTF8 *pszName)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
return GetMDImport()->GetNameOfFieldDef(GetMemberDef(), pszName);
}
BOOL MightHaveName(ULONG nameHashValue);
// <TODO>@TODO: </TODO>This is slow, don't use it!
DWORD GetAttributes()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
DWORD dwAttributes;
if (FAILED(GetMDImport()->GetFieldDefProps(GetMemberDef(), &dwAttributes)))
{ // Class loader already asked for attributes, so this should always succeed (unless there's a
// bug or a new code path)
_ASSERTE(!"If this ever fires, then this method should return HRESULT");
return 0;
}
return dwAttributes;
}
// Mini-Helpers
DWORD IsPublic()
{
WRAPPER_NO_CONTRACT;
return IsFdPublic(GetFieldProtection());
}
DWORD IsProtected()
{
WRAPPER_NO_CONTRACT;
return IsFdFamily(GetFieldProtection());
}
DWORD IsPrivate()
{
WRAPPER_NO_CONTRACT;
return IsFdPrivate(GetFieldProtection());
}
IMDInternalImport *GetMDImport()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetModule()->GetMDImport();
}
#ifndef DACCESS_COMPILE
IMetaDataImport *GetRWImporter()
{
WRAPPER_NO_CONTRACT;
return GetModule()->GetRWImporter();
}
#endif // DACCESS_COMPILE
TypeHandle LookupFieldTypeHandle(ClassLoadLevel level = CLASS_LOADED, BOOL dropGenericArgumentLevel = FALSE);
TypeHandle LookupApproxFieldTypeHandle()
{
WRAPPER_NO_CONTRACT;
return LookupFieldTypeHandle(CLASS_LOAD_APPROXPARENTS, TRUE);
}
// Instance FieldDesc can be shared between generic instantiations. So List<String>._items
// is really the same as List<__Canon>._items. Hence, the FieldDesc itself
// cannot know the exact field type. This function returns the approximate field type.
// For eg. this will return "__Canon[]" for List<String>._items.
TypeHandle GetFieldTypeHandleThrowing(ClassLoadLevel level = CLASS_LOADED, BOOL dropGenericArgumentLevel = FALSE);
TypeHandle GetApproxFieldTypeHandleThrowing()
{
WRAPPER_NO_CONTRACT;
return GetFieldTypeHandleThrowing(CLASS_LOAD_APPROXPARENTS, TRUE);
}
// Given a type handle of an object and a method that comes from some
// superclass of the class of that object, find the instantiation of
// that superclass, i.e. the class instantiation which will be relevant
// to interpreting the signature of the method. The type handle of
// the object does not need to be given in all circumstances, in
// particular it is only needed for FieldDescs pFD that
// return true for pFD->GetApproxEnclosingMethodTable()->IsSharedByGenericInstantiations().
// In other cases it is allowed to be null and will be ignored.
//
// Will return NULL if the field is not in a generic class.
Instantiation GetExactClassInstantiation(TypeHandle possibleObjType);
// Instance FieldDesc can be shared between generic instantiations. So List<String>._items
// is really the same as List<__Canon>._items. Hence, the FieldDesc itself
// cannot know the exact field type. You need to specify the owner
// like List<String> in order to get the exact type which would be "String[]"
TypeHandle GetExactFieldType(TypeHandle owner);
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
SUPPORTS_DAC;
DAC_ENUM_DTHIS();
}
#endif
#ifndef DACCESS_COMPILE
REFLECTFIELDREF GetStubFieldInfo();
#endif
};
#endif // _FIELD_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// COM+ Data Field Abstraction
//
#ifndef _FIELD_H_
#define _FIELD_H_
#include "excep.h"
// Temporary values stored in FieldDesc m_dwOffset during loading
// The high 5 bits must be zero (because in field.h we steal them for other uses), so we must choose values > 0
#define FIELD_OFFSET_MAX ((1<<27)-1)
#define FIELD_OFFSET_UNPLACED FIELD_OFFSET_MAX
#define FIELD_OFFSET_UNPLACED_GC_PTR (FIELD_OFFSET_MAX-1)
#define FIELD_OFFSET_VALUE_CLASS (FIELD_OFFSET_MAX-2)
#define FIELD_OFFSET_NOT_REAL_FIELD (FIELD_OFFSET_MAX-3)
// Offset to indicate an EnC added field. They don't have offsets as aren't placed in the object.
#define FIELD_OFFSET_NEW_ENC (FIELD_OFFSET_MAX-4)
#define FIELD_OFFSET_BIG_RVA (FIELD_OFFSET_MAX-5)
#define FIELD_OFFSET_LAST_REAL_OFFSET (FIELD_OFFSET_MAX-6) // real fields have to be smaller than this
//
// This describes a field - one of this is allocated for every field, so don't make this structure any larger.
//
// @GENERICS:
// Field descriptors for fields in instantiated types may be shared between compatible instantiations
// Hence for reflection it's necessary to pair a field desc with the exact owning type handle
class FieldDesc
{
friend class MethodTableBuilder;
protected:
PTR_MethodTable m_pMTOfEnclosingClass; // This is used to hold the log2 of the field size temporarily during class loading. Yuck.
// See also: FieldDesc::InitializeFrom method
#if defined(DACCESS_COMPILE)
union { //create a union so I can get the correct offset for ClrDump.
unsigned m_dword1;
struct {
#endif
// Note that we may store other information in the high bits if available --
// see enum_packedMBLayout and m_requiresFullMbValue for details.
unsigned m_mb : 24;
// 8 bits...
unsigned m_isStatic : 1;
unsigned m_isThreadLocal : 1;
unsigned m_isRVA : 1;
unsigned m_prot : 3;
// Does this field's mb require all 24 bits
unsigned m_requiresFullMbValue : 1;
#if defined(DACCESS_COMPILE)
};
};
#endif
#if defined(DACCESS_COMPILE)
union { //create a union so I can get the correct offset for ClrDump
unsigned m_dword2;
struct {
#endif
// Note: this has been as low as 22 bits in the past & seemed to be OK.
// we can steal some more bits here if we need them.
unsigned m_dwOffset : 27;
unsigned m_type : 5;
#if defined(DACCESS_COMPILE)
};
};
#endif
#ifdef _DEBUG
LPUTF8 m_debugName;
#endif
public:
// Allocated by special heap means, don't construct me
FieldDesc() =delete;
#ifndef DACCESS_COMPILE
void InitializeFrom(const FieldDesc& sourceField, MethodTable *pMT)
{
m_pMTOfEnclosingClass = pMT;
m_mb = sourceField.m_mb;
m_isStatic = sourceField.m_isStatic;
m_isThreadLocal = sourceField.m_isThreadLocal;
m_isRVA = sourceField.m_isRVA;
m_prot = sourceField.m_prot;
m_requiresFullMbValue = sourceField.m_requiresFullMbValue;
m_dwOffset = sourceField.m_dwOffset;
m_type = sourceField.m_type;
#ifdef _DEBUG
m_debugName = sourceField.m_debugName;
#endif // _DEBUG
}
#endif // !DACCESS_COMPILE
#ifdef _DEBUG
inline LPUTF8 GetDebugName()
{
LIMITED_METHOD_CONTRACT;
return m_debugName;
}
#endif // _DEBUG
#ifndef DACCESS_COMPILE
// This should be called. It was added so that Reflection
// can create FieldDesc's for the static primitive fields that aren't
// stored with the EEClass.
void SetMethodTable(MethodTable* mt)
{
LIMITED_METHOD_CONTRACT;
m_pMTOfEnclosingClass = mt;
}
#endif
VOID Init(mdFieldDef mb,
CorElementType FieldType,
DWORD dwMemberAttrs,
BOOL fIsStatic,
BOOL fIsRVA,
BOOL fIsThreadLocal,
LPCSTR pszFieldName);
enum {
enum_packedMbLayout_MbMask = 0x01FFFF,
enum_packedMbLayout_NameHashMask = 0xFE0000
};
void SetMemberDef(mdFieldDef mb)
{
WRAPPER_NO_CONTRACT;
// Check if we have to avoid using the packed mb layout
if (RidFromToken(mb) > enum_packedMbLayout_MbMask)
{
m_requiresFullMbValue = 1;
}
// Set only the portion of m_mb we are using
if (!m_requiresFullMbValue)
{
m_mb &= ~enum_packedMbLayout_MbMask;
m_mb |= RidFromToken(mb);
}
else
{
m_mb = RidFromToken(mb);
}
}
mdFieldDef GetMemberDef() const
{
LIMITED_METHOD_DAC_CONTRACT;
// Check if this FieldDesc is using the packed mb layout
if (!m_requiresFullMbValue)
{
return TokenFromRid(m_mb & enum_packedMbLayout_MbMask, mdtFieldDef);
}
return TokenFromRid(m_mb, mdtFieldDef);
}
CorElementType GetFieldType()
{
LIMITED_METHOD_DAC_CONTRACT;
// Set in code:FieldDesc.Init which in turn is called from
// code:MethodTableBuilder.InitializeFieldDescs#InitCall which in turn calls
// code:MethodTableBuilder.InitializeFieldDescs#FieldDescTypeMorph
return (CorElementType) m_type;
}
DWORD GetFieldProtection()
{
LIMITED_METHOD_CONTRACT;
// Set in code:FieldDesc.Init which in turn is called from code:MethodTableBuilder::InitializeFieldDescs#InitCall
return m_prot;
}
// Please only use this in a path that you have already guarenteed
// the assert is true
DWORD GetOffsetUnsafe()
{
LIMITED_METHOD_CONTRACT;
g_IBCLogger.LogFieldDescsAccess(this);
_ASSERTE(m_dwOffset <= FIELD_OFFSET_LAST_REAL_OFFSET);
return m_dwOffset;
}
DWORD GetOffset()
{
LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogFieldDescsAccess(this);
return GetOffset_NoLogging();
}
// During class load m_pMTOfEnclosingClass has the field size in it, so it has to use this version of
// GetOffset during that time
DWORD GetOffset_NoLogging()
{
LIMITED_METHOD_DAC_CONTRACT;
// Note FieldDescs are no longer on "hot" paths so the optimized code here
// does not look necessary.
if (m_dwOffset != FIELD_OFFSET_BIG_RVA) {
// Assert that the big RVA case handling doesn't get out of sync
// with the normal RVA case.
#ifdef _DEBUG
// The OutOfLine_BigRVAOffset() can't be correctly evaluated during the time
// that we repurposed m_pMTOfEnclosingClass for holding the field size
// I don't see any good way to determine when this is so hurray for
// heuristics!
//
// As of 4/11/2012 I could repro this by turning on the COMPLUS log and
// the LOG() at line methodtablebuilder.cpp:7845
// MethodTableBuilder::PlaceRegularStaticFields() calls GetOffset_NoLogging()
if((DWORD)(DWORD_PTR&)m_pMTOfEnclosingClass > 16)
{
_ASSERTE(!this->IsRVA() || (m_dwOffset == OutOfLine_BigRVAOffset()));
}
#endif
return m_dwOffset;
}
return OutOfLine_BigRVAOffset();
}
DWORD OutOfLine_BigRVAOffset()
{
LIMITED_METHOD_DAC_CONTRACT;
DWORD rva;
// <NICE>I'm discarding a potential error here. According to the code in MDInternalRO.cpp,
// we won't get an error if we initially found the RVA. So I'm going to just
// assert it never happens.
//
// This is a small sin, but I don't see a good alternative. --cwb.</NICE>
HRESULT hr;
hr = GetMDImport()->GetFieldRVA(GetMemberDef(), &rva);
_ASSERTE(SUCCEEDED(hr));
return rva;
}
HRESULT SetOffset(DWORD dwOffset)
{
LIMITED_METHOD_CONTRACT;
//
// value class fields must be aligned to pointer-sized boundaries
//
//
// This is commented out because it isn't valid in all cases.
// This is still here because it is useful for finding alignment
// problems on IA64.
//
//_ASSERTE((dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET) ||
// (ELEMENT_TYPE_VALUETYPE != GetFieldType()) ||
// (IS_ALIGNED(dwOffset, sizeof(void*))));
m_dwOffset = dwOffset;
return((dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET) ? COR_E_TYPELOAD : S_OK);
}
// Okay, we've stolen too many bits from FieldDescs. In the RVA case, there's no
// reason to believe they will be limited to 22 bits. So use a sentinel for the
// huge cases, and recover them from metadata on-demand.
void SetOffsetRVA(DWORD dwOffset)
{
LIMITED_METHOD_CONTRACT;
m_dwOffset = (dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET)
? FIELD_OFFSET_BIG_RVA
: dwOffset;
}
DWORD IsStatic() const
{
LIMITED_METHOD_DAC_CONTRACT;
return m_isStatic;
}
BOOL IsSpecialStatic()
{
LIMITED_METHOD_CONTRACT;
return m_isStatic && (m_isRVA || m_isThreadLocal
);
}
BOOL IsRVA() const // Has an explicit RVA associated with it
{
LIMITED_METHOD_DAC_CONTRACT;
return m_isRVA;
}
BOOL IsThreadStatic() const // Static relative to a thread
{
LIMITED_METHOD_DAC_CONTRACT;
return m_isThreadLocal;
}
// Indicate that this field was added by EnC
// Must only be called on instances of EnCFieldDesc
void SetEnCNew()
{
WRAPPER_NO_CONTRACT;
// EnC added fields don't live in the actual object, so don't have a real offset
SetOffset(FIELD_OFFSET_NEW_ENC);
}
// Was this field added by EnC?
// If this is true, then this object is an instance of EnCFieldDesc
BOOL IsEnCNew()
{
LIMITED_METHOD_DAC_CONTRACT;
// EnC added fields don't have a real offset
return m_dwOffset == FIELD_OFFSET_NEW_ENC;
}
BOOL IsByValue()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetFieldType() == ELEMENT_TYPE_VALUETYPE;
}
BOOL IsPrimitive()
{
LIMITED_METHOD_DAC_CONTRACT;
return (CorIsPrimitiveType(GetFieldType()) != FALSE);
}
BOOL IsObjRef();
BOOL IsByRef();
UINT LoadSize();
// Return -1 if the type isn't loaded yet (i.e. if LookupFieldTypeHandle() would return null)
UINT GetSize();
// These routines encapsulate the operation of getting and setting
// fields.
void GetInstanceField(OBJECTREF o, VOID * pOutVal);
void SetInstanceField(OBJECTREF o, const VOID * pInVal);
void* GetInstanceAddress(OBJECTREF o);
// Get the address of a field within object 'o'
PTR_VOID GetAddress(PTR_VOID o);
PTR_VOID GetAddressNoThrowNoGC(PTR_VOID o);
void* GetAddressGuaranteedInHeap(void *o);
void* GetValuePtr(OBJECTREF o);
VOID SetValuePtr(OBJECTREF o, void* pValue);
DWORD GetValue32(OBJECTREF o);
VOID SetValue32(OBJECTREF o, DWORD dwValue);
OBJECTREF GetRefValue(OBJECTREF o);
VOID SetRefValue(OBJECTREF o, OBJECTREF orValue);
USHORT GetValue16(OBJECTREF o);
VOID SetValue16(OBJECTREF o, DWORD dwValue);
BYTE GetValue8(OBJECTREF o);
VOID SetValue8(OBJECTREF o, DWORD dwValue);
__int64 GetValue64(OBJECTREF o);
VOID SetValue64(OBJECTREF o, __int64 value);
PTR_MethodTable GetApproxEnclosingMethodTable_NoLogging()
{
LIMITED_METHOD_DAC_CONTRACT;
return m_pMTOfEnclosingClass;
}
PTR_MethodTable GetApproxEnclosingMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogFieldDescsAccess(this);
return GetApproxEnclosingMethodTable_NoLogging();
}
PTR_MethodTable GetEnclosingMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
_ASSERTE(!IsSharedByGenericInstantiations());
return GetApproxEnclosingMethodTable();
}
// FieldDesc can be shared between generic instantiations. So List<String>._items
// is really the same as List<__Canon>._items. Hence, the FieldDesc itself
// cannot know the exact enclosing type. You need to provide the exact owner
// like List<String> or a subtype like MyInheritedList<String>.
MethodTable * GetExactDeclaringType(MethodTable * ownerOrSubType);
BOOL IsSharedByGenericInstantiations()
{
LIMITED_METHOD_DAC_CONTRACT;
return (!IsStatic()) && GetApproxEnclosingMethodTable()->IsSharedByGenericInstantiations();
}
BOOL IsFieldOfValueType()
{
WRAPPER_NO_CONTRACT;
return GetApproxEnclosingMethodTable()->IsValueType();
}
DWORD GetNumGenericClassArgs()
{
WRAPPER_NO_CONTRACT;
return GetApproxEnclosingMethodTable()->GetNumGenericArgs();
}
PTR_BYTE GetBaseInDomainLocalModule(DomainLocalModule * pLocalModule)
{
WRAPPER_NO_CONTRACT;
if (GetFieldType() == ELEMENT_TYPE_CLASS || GetFieldType() == ELEMENT_TYPE_VALUETYPE)
{
return pLocalModule->GetGCStaticsBasePointer(GetEnclosingMethodTable());
}
else
{
return pLocalModule->GetNonGCStaticsBasePointer(GetEnclosingMethodTable());
}
}
PTR_BYTE GetBase()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END
MethodTable *pMT = GetEnclosingMethodTable();
return GetBaseInDomainLocalModule(pMT->GetDomainLocalModule());
}
// returns the address of the field
void* GetStaticAddress(void *base);
// In all cases except Value classes, the AddressHandle is
// simply the address of the static. For the case of value
// types, however, it is the address of OBJECTREF that holds
// the boxed value used to hold the value type. This is needed
// because the OBJECTREF moves, and the JIT needs to embed something
// in the code that does not move. Thus the jit has to
// dereference and unbox before the access.
PTR_VOID GetStaticAddressHandle(PTR_VOID base);
#ifndef DACCESS_COMPILE
OBJECTREF GetStaticOBJECTREF()
{
WRAPPER_NO_CONTRACT;
return *(OBJECTREF *)GetCurrentStaticAddress();
}
VOID SetStaticOBJECTREF(OBJECTREF objRef);
void* GetStaticValuePtr()
{
WRAPPER_NO_CONTRACT;
return *(void**)GetCurrentStaticAddress();
}
VOID SetStaticValuePtr(void *value)
{
WRAPPER_NO_CONTRACT;
*(void**)GetCurrentStaticAddress() = value;
}
DWORD GetStaticValue32()
{
WRAPPER_NO_CONTRACT;
return *(DWORD*)GetCurrentStaticAddress();
}
VOID SetStaticValue32(DWORD dwValue)
{
WRAPPER_NO_CONTRACT;
*(DWORD*)GetCurrentStaticAddress() = dwValue;
}
USHORT GetStaticValue16()
{
WRAPPER_NO_CONTRACT;
return *(USHORT*)GetCurrentStaticAddress();
}
VOID SetStaticValue16(DWORD dwValue)
{
WRAPPER_NO_CONTRACT;
*(USHORT*)GetCurrentStaticAddress() = (USHORT)dwValue;
}
BYTE GetStaticValue8()
{
WRAPPER_NO_CONTRACT;
return *(BYTE*)GetCurrentStaticAddress();
}
VOID SetStaticValue8(DWORD dwValue)
{
WRAPPER_NO_CONTRACT;
*(BYTE*)GetCurrentStaticAddress() = (BYTE)dwValue;
}
__int64 GetStaticValue64()
{
WRAPPER_NO_CONTRACT;
return *(__int64*)GetCurrentStaticAddress();
}
VOID SetStaticValue64(__int64 qwValue)
{
WRAPPER_NO_CONTRACT;
*(__int64*)GetCurrentStaticAddress() = qwValue;
}
void* GetCurrentStaticAddress()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END
_ASSERTE(IsStatic());
if (IsThreadStatic())
{
return Thread::GetStaticFieldAddress(this);
}
else {
PTR_BYTE base = 0;
if (!IsRVA()) // for RVA the base is ignored
base = GetBase();
return GetStaticAddress((void *)dac_cast<TADDR>(base));
}
}
VOID CheckRunClassInitThrowing()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END;
GetEnclosingMethodTable()->CheckRunClassInitThrowing();
}
#endif //DACCESS_COMPILE
Module *GetModule()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetApproxEnclosingMethodTable()->GetModule();
}
Module *GetLoaderModule()
{
WRAPPER_NO_CONTRACT;
// Field Desc's are currently always saved into the same module as their
// corresponding method table.
return GetApproxEnclosingMethodTable()->GetLoaderModule();
}
void GetSig(PCCOR_SIGNATURE *ppSig, DWORD *pcSig)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
if (FAILED(GetMDImport()->GetSigOfFieldDef(GetMemberDef(), pcSig, ppSig)))
{ // Class loader already asked for signature, so this should always succeed (unless there's a
// bug or a new code path)
_ASSERTE(!"If this ever fires, then this method should return HRESULT");
*ppSig = NULL;
*pcSig = 0;
}
}
SigPointer GetSigPointer()
{
WRAPPER_NO_CONTRACT;
PCCOR_SIGNATURE pSig;
DWORD cSig;
GetSig(&pSig, &cSig);
return SigPointer(pSig, cSig);
}
// This is slow (uses MetaData), don't use it!
LPCUTF8 GetName()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
LPCSTR szName;
IfFailThrow(GetMDImport()->GetNameOfFieldDef(GetMemberDef(), &szName));
_ASSERTE(szName != NULL);
return szName;
}
// This is slow (uses MetaData), don't use it!
__checkReturn
HRESULT GetName_NoThrow(LPCUTF8 *pszName)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
return GetMDImport()->GetNameOfFieldDef(GetMemberDef(), pszName);
}
BOOL MightHaveName(ULONG nameHashValue);
// <TODO>@TODO: </TODO>This is slow, don't use it!
DWORD GetAttributes()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
DWORD dwAttributes;
if (FAILED(GetMDImport()->GetFieldDefProps(GetMemberDef(), &dwAttributes)))
{ // Class loader already asked for attributes, so this should always succeed (unless there's a
// bug or a new code path)
_ASSERTE(!"If this ever fires, then this method should return HRESULT");
return 0;
}
return dwAttributes;
}
// Mini-Helpers
DWORD IsPublic()
{
WRAPPER_NO_CONTRACT;
return IsFdPublic(GetFieldProtection());
}
DWORD IsProtected()
{
WRAPPER_NO_CONTRACT;
return IsFdFamily(GetFieldProtection());
}
DWORD IsPrivate()
{
WRAPPER_NO_CONTRACT;
return IsFdPrivate(GetFieldProtection());
}
IMDInternalImport *GetMDImport()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetModule()->GetMDImport();
}
#ifndef DACCESS_COMPILE
IMetaDataImport *GetRWImporter()
{
WRAPPER_NO_CONTRACT;
return GetModule()->GetRWImporter();
}
#endif // DACCESS_COMPILE
TypeHandle LookupFieldTypeHandle(ClassLoadLevel level = CLASS_LOADED, BOOL dropGenericArgumentLevel = FALSE);
TypeHandle LookupApproxFieldTypeHandle()
{
WRAPPER_NO_CONTRACT;
return LookupFieldTypeHandle(CLASS_LOAD_APPROXPARENTS, TRUE);
}
// Instance FieldDesc can be shared between generic instantiations. So List<String>._items
// is really the same as List<__Canon>._items. Hence, the FieldDesc itself
// cannot know the exact field type. This function returns the approximate field type.
// For eg. this will return "__Canon[]" for List<String>._items.
TypeHandle GetFieldTypeHandleThrowing(ClassLoadLevel level = CLASS_LOADED, BOOL dropGenericArgumentLevel = FALSE);
TypeHandle GetApproxFieldTypeHandleThrowing()
{
WRAPPER_NO_CONTRACT;
return GetFieldTypeHandleThrowing(CLASS_LOAD_APPROXPARENTS, TRUE);
}
// Given a type handle of an object and a method that comes from some
// superclass of the class of that object, find the instantiation of
// that superclass, i.e. the class instantiation which will be relevant
// to interpreting the signature of the method. The type handle of
// the object does not need to be given in all circumstances, in
// particular it is only needed for FieldDescs pFD that
// return true for pFD->GetApproxEnclosingMethodTable()->IsSharedByGenericInstantiations().
// In other cases it is allowed to be null and will be ignored.
//
// Will return NULL if the field is not in a generic class.
Instantiation GetExactClassInstantiation(TypeHandle possibleObjType);
// Instance FieldDesc can be shared between generic instantiations. So List<String>._items
// is really the same as List<__Canon>._items. Hence, the FieldDesc itself
// cannot know the exact field type. You need to specify the owner
// like List<String> in order to get the exact type which would be "String[]"
TypeHandle GetExactFieldType(TypeHandle owner);
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
SUPPORTS_DAC;
DAC_ENUM_DTHIS();
}
#endif
#ifndef DACCESS_COMPILE
REFLECTFIELDREF GetStubFieldInfo();
#endif
};
#endif // _FIELD_H_
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/pal/src/libunwind/src/arm/Lget_save_loc.c | #define UNW_LOCAL_ONLY
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Gget_save_loc.c"
#endif
| #define UNW_LOCAL_ONLY
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Gget_save_loc.c"
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/nativeaot/Runtime/unix/UnixContext.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __UNIX_CONTEXT_H__
#define __UNIX_CONTEXT_H__
// Convert Unix native context to PAL_LIMITED_CONTEXT
void NativeContextToPalContext(const void* context, PAL_LIMITED_CONTEXT* palContext);
// Redirect Unix native context to the PAL_LIMITED_CONTEXT and also set the first two argument registers
void RedirectNativeContext(void* context, const PAL_LIMITED_CONTEXT* palContext, uintptr_t arg0Reg, uintptr_t arg1Reg);
// Find LSDA and start address for a function at address controlPC
bool FindProcInfo(uintptr_t controlPC, uintptr_t* startAddress, uintptr_t* lsda);
// Virtually unwind stack to the caller of the context specified by the REGDISPLAY
bool VirtualUnwind(REGDISPLAY* pRegisterSet);
#ifdef HOST_AMD64
// Get value of a register from the native context. The index is the processor specific
// register index stored in machine instructions.
uint64_t GetRegisterValueByIndex(void* context, uint32_t index);
// Get value of the program counter from the native context
uint64_t GetPC(void* context);
#endif // HOST_AMD64
#endif // __UNIX_CONTEXT_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __UNIX_CONTEXT_H__
#define __UNIX_CONTEXT_H__
// Convert Unix native context to PAL_LIMITED_CONTEXT
void NativeContextToPalContext(const void* context, PAL_LIMITED_CONTEXT* palContext);
// Redirect Unix native context to the PAL_LIMITED_CONTEXT and also set the first two argument registers
void RedirectNativeContext(void* context, const PAL_LIMITED_CONTEXT* palContext, uintptr_t arg0Reg, uintptr_t arg1Reg);
// Find LSDA and start address for a function at address controlPC
bool FindProcInfo(uintptr_t controlPC, uintptr_t* startAddress, uintptr_t* lsda);
// Virtually unwind stack to the caller of the context specified by the REGDISPLAY
bool VirtualUnwind(REGDISPLAY* pRegisterSet);
#ifdef HOST_AMD64
// Get value of a register from the native context. The index is the processor specific
// register index stored in machine instructions.
uint64_t GetRegisterValueByIndex(void* context, uint32_t index);
// Get value of the program counter from the native context
uint64_t GetPC(void* context);
#endif // HOST_AMD64
#endif // __UNIX_CONTEXT_H__
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/debug/createdump/threadinfo.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
class CrashInfo;
#if defined(__aarch64__)
// See src/pal/src/include/pal/context.h
#define MCREG_Fp(mc) ((mc).regs[29])
#define MCREG_Lr(mc) ((mc).regs[30])
#define MCREG_Sp(mc) ((mc).sp)
#define MCREG_Pc(mc) ((mc).pc)
#define MCREG_Cpsr(mc) ((mc).pstate)
#endif
#define FPREG_ErrorOffset(fpregs) *(DWORD*)&((fpregs).rip)
#define FPREG_ErrorSelector(fpregs) *(((WORD*)&((fpregs).rip)) + 2)
#define FPREG_DataOffset(fpregs) *(DWORD*)&((fpregs).rdp)
#define FPREG_DataSelector(fpregs) *(((WORD*)&((fpregs).rdp)) + 2)
#if defined(__arm__)
#define user_regs_struct user_regs
#define user_fpregs_struct user_fpregs
#endif
#if defined(__aarch64__)
#define user_fpregs_struct user_fpsimd_struct
#endif
#if defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
struct user_vfpregs_struct
{
unsigned long long fpregs[32];
unsigned long fpscr;
} __attribute__((__packed__));
#endif
#define STACK_OVERFLOW_EXCEPTION 0x800703e9
class ThreadInfo
{
private:
CrashInfo& m_crashInfo; // crashinfo instance
pid_t m_tid; // thread id
pid_t m_ppid; // parent process
pid_t m_tgid; // thread group
bool m_managed; // if true, thread has managed code running
uint64_t m_exceptionObject; // exception object address
std::string m_exceptionType; // exception type
uint32_t m_exceptionHResult; // exception HRESULT
std::set<StackFrame> m_frames; // stack frames
int m_repeatedFrames; // number of repeated frames
std::set<StackFrame>::const_iterator m_beginRepeat; // beginning of stack overflow repeated frame sequence
std::set<StackFrame>::const_iterator m_endRepeat; // end of repeated frame sequence
#ifdef __APPLE__
mach_port_t m_port; // MacOS thread port
#if defined(__x86_64__)
x86_thread_state64_t m_gpRegisters; // MacOS general purpose registers
x86_float_state64_t m_fpRegisters; // MacOS floating point registers
#elif defined(__aarch64__)
arm_thread_state64_t m_gpRegisters; // MacOS general purpose arm64 registers
arm_neon_state64_t m_fpRegisters; // MacOS floating point arm64 registers
#endif
#else // __APPLE__
struct user_regs_struct m_gpRegisters; // general purpose registers
struct user_fpregs_struct m_fpRegisters; // floating point registers
#if defined(__i386__)
struct user_fpxregs_struct m_fpxRegisters; // x86 floating point registers
#elif defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
struct user_vfpregs_struct m_vfpRegisters; // ARM VFP/NEON registers
#endif
#endif // __APPLE__
// no public copy constructor
ThreadInfo(const ThreadInfo&) = delete;
void operator=(const ThreadInfo&) = delete;
public:
#ifdef __APPLE__
ThreadInfo(CrashInfo& crashInfo, pid_t tid, mach_port_t port);
inline mach_port_t Port() const { return m_port; }
#else
ThreadInfo(CrashInfo& crashInfo, pid_t tid);
#endif
~ThreadInfo();
bool Initialize();
bool UnwindThread(IXCLRDataProcess* pClrDataProcess, ISOSDacInterface* pSos);
void GetThreadStack();
void GetThreadContext(uint32_t flags, CONTEXT* context) const;
inline pid_t Tid() const { return m_tid; }
inline pid_t Ppid() const { return m_ppid; }
inline pid_t Tgid() const { return m_tgid; }
inline bool IsManaged() const { return m_managed; }
inline uint64_t ManagedExceptionObject() const { return m_exceptionObject; }
inline uint32_t ManagedExceptionHResult() const { return m_exceptionHResult; }
inline std::string ManagedExceptionType() const { return m_exceptionType; }
inline const std::set<StackFrame>& StackFrames() const { return m_frames; }
inline int NumRepeatedFrames() const { return m_repeatedFrames; }
inline bool IsBeginRepeat(std::set<StackFrame>::const_iterator& iterator) const { return m_repeatedFrames > 0 && iterator == m_beginRepeat; }
inline bool IsEndRepeat(std::set<StackFrame>::const_iterator& iterator) const { return m_repeatedFrames > 0 && iterator == m_endRepeat; }
#ifdef __APPLE__
#if defined(__x86_64__)
inline const x86_thread_state64_t* GPRegisters() const { return &m_gpRegisters; }
inline const x86_float_state64_t* FPRegisters() const { return &m_fpRegisters; }
inline const uint64_t GetInstructionPointer() const { return m_gpRegisters.__rip; }
inline const uint64_t GetFramePointer() const { return m_gpRegisters.__rbp; }
inline const uint64_t GetStackPointer() const { return m_gpRegisters.__rsp; }
#elif defined(__aarch64__)
inline const arm_thread_state64_t* GPRegisters() const { return &m_gpRegisters; }
inline const arm_neon_state64_t* FPRegisters() const { return &m_fpRegisters; }
inline const uint64_t GetInstructionPointer() const { return arm_thread_state64_get_pc(m_gpRegisters); }
inline const uint64_t GetFramePointer() const { return arm_thread_state64_get_fp(m_gpRegisters); }
inline const uint64_t GetStackPointer() const { return arm_thread_state64_get_sp(m_gpRegisters); }
#endif
#else // __APPLE__
inline const user_regs_struct* GPRegisters() const { return &m_gpRegisters; }
inline const user_fpregs_struct* FPRegisters() const { return &m_fpRegisters; }
#if defined(__i386__)
inline const user_fpxregs_struct* FPXRegisters() const { return &m_fpxRegisters; }
#elif defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
inline const user_vfpregs_struct* VFPRegisters() const { return &m_vfpRegisters; }
#endif
#if defined(__x86_64__)
inline const uint64_t GetInstructionPointer() const { return m_gpRegisters.rip; }
inline const uint64_t GetStackPointer() const { return m_gpRegisters.rsp; }
inline const uint64_t GetFramePointer() const { return m_gpRegisters.rbp; }
#elif defined(__aarch64__)
inline const uint64_t GetInstructionPointer() const { return MCREG_Pc(m_gpRegisters); }
inline const uint64_t GetStackPointer() const { return MCREG_Sp(m_gpRegisters); }
inline const uint64_t GetFramePointer() const { return MCREG_Fp(m_gpRegisters); }
#elif defined(__arm__)
inline const uint64_t GetInstructionPointer() const { return m_gpRegisters.ARM_pc; }
inline const uint64_t GetStackPointer() const { return m_gpRegisters.ARM_sp; }
inline const uint64_t GetFramePointer() const { return m_gpRegisters.ARM_fp; }
#endif
#endif // __APPLE__
private:
void UnwindNativeFrames(CONTEXT* pContext);
void GatherStackFrames(CONTEXT* pContext, IXCLRDataStackWalk* pStackwalk);
void AddStackFrame(const StackFrame& frame);
#ifndef __APPLE__
bool GetRegistersWithPTrace();
#endif
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
class CrashInfo;
#if defined(__aarch64__)
// See src/pal/src/include/pal/context.h
#define MCREG_Fp(mc) ((mc).regs[29])
#define MCREG_Lr(mc) ((mc).regs[30])
#define MCREG_Sp(mc) ((mc).sp)
#define MCREG_Pc(mc) ((mc).pc)
#define MCREG_Cpsr(mc) ((mc).pstate)
#endif
#define FPREG_ErrorOffset(fpregs) *(DWORD*)&((fpregs).rip)
#define FPREG_ErrorSelector(fpregs) *(((WORD*)&((fpregs).rip)) + 2)
#define FPREG_DataOffset(fpregs) *(DWORD*)&((fpregs).rdp)
#define FPREG_DataSelector(fpregs) *(((WORD*)&((fpregs).rdp)) + 2)
#if defined(__arm__)
#define user_regs_struct user_regs
#define user_fpregs_struct user_fpregs
#endif
#if defined(__aarch64__)
#define user_fpregs_struct user_fpsimd_struct
#endif
#if defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
struct user_vfpregs_struct
{
unsigned long long fpregs[32];
unsigned long fpscr;
} __attribute__((__packed__));
#endif
#define STACK_OVERFLOW_EXCEPTION 0x800703e9
class ThreadInfo
{
private:
CrashInfo& m_crashInfo; // crashinfo instance
pid_t m_tid; // thread id
pid_t m_ppid; // parent process
pid_t m_tgid; // thread group
bool m_managed; // if true, thread has managed code running
uint64_t m_exceptionObject; // exception object address
std::string m_exceptionType; // exception type
uint32_t m_exceptionHResult; // exception HRESULT
std::set<StackFrame> m_frames; // stack frames
int m_repeatedFrames; // number of repeated frames
std::set<StackFrame>::const_iterator m_beginRepeat; // beginning of stack overflow repeated frame sequence
std::set<StackFrame>::const_iterator m_endRepeat; // end of repeated frame sequence
#ifdef __APPLE__
mach_port_t m_port; // MacOS thread port
#if defined(__x86_64__)
x86_thread_state64_t m_gpRegisters; // MacOS general purpose registers
x86_float_state64_t m_fpRegisters; // MacOS floating point registers
#elif defined(__aarch64__)
arm_thread_state64_t m_gpRegisters; // MacOS general purpose arm64 registers
arm_neon_state64_t m_fpRegisters; // MacOS floating point arm64 registers
#endif
#else // __APPLE__
struct user_regs_struct m_gpRegisters; // general purpose registers
struct user_fpregs_struct m_fpRegisters; // floating point registers
#if defined(__i386__)
struct user_fpxregs_struct m_fpxRegisters; // x86 floating point registers
#elif defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
struct user_vfpregs_struct m_vfpRegisters; // ARM VFP/NEON registers
#endif
#endif // __APPLE__
// no public copy constructor
ThreadInfo(const ThreadInfo&) = delete;
void operator=(const ThreadInfo&) = delete;
public:
#ifdef __APPLE__
ThreadInfo(CrashInfo& crashInfo, pid_t tid, mach_port_t port);
inline mach_port_t Port() const { return m_port; }
#else
ThreadInfo(CrashInfo& crashInfo, pid_t tid);
#endif
~ThreadInfo();
bool Initialize();
bool UnwindThread(IXCLRDataProcess* pClrDataProcess, ISOSDacInterface* pSos);
void GetThreadStack();
void GetThreadContext(uint32_t flags, CONTEXT* context) const;
inline pid_t Tid() const { return m_tid; }
inline pid_t Ppid() const { return m_ppid; }
inline pid_t Tgid() const { return m_tgid; }
inline bool IsManaged() const { return m_managed; }
inline uint64_t ManagedExceptionObject() const { return m_exceptionObject; }
inline uint32_t ManagedExceptionHResult() const { return m_exceptionHResult; }
inline std::string ManagedExceptionType() const { return m_exceptionType; }
inline const std::set<StackFrame>& StackFrames() const { return m_frames; }
inline int NumRepeatedFrames() const { return m_repeatedFrames; }
inline bool IsBeginRepeat(std::set<StackFrame>::const_iterator& iterator) const { return m_repeatedFrames > 0 && iterator == m_beginRepeat; }
inline bool IsEndRepeat(std::set<StackFrame>::const_iterator& iterator) const { return m_repeatedFrames > 0 && iterator == m_endRepeat; }
#ifdef __APPLE__
#if defined(__x86_64__)
inline const x86_thread_state64_t* GPRegisters() const { return &m_gpRegisters; }
inline const x86_float_state64_t* FPRegisters() const { return &m_fpRegisters; }
inline const uint64_t GetInstructionPointer() const { return m_gpRegisters.__rip; }
inline const uint64_t GetFramePointer() const { return m_gpRegisters.__rbp; }
inline const uint64_t GetStackPointer() const { return m_gpRegisters.__rsp; }
#elif defined(__aarch64__)
inline const arm_thread_state64_t* GPRegisters() const { return &m_gpRegisters; }
inline const arm_neon_state64_t* FPRegisters() const { return &m_fpRegisters; }
inline const uint64_t GetInstructionPointer() const { return arm_thread_state64_get_pc(m_gpRegisters); }
inline const uint64_t GetFramePointer() const { return arm_thread_state64_get_fp(m_gpRegisters); }
inline const uint64_t GetStackPointer() const { return arm_thread_state64_get_sp(m_gpRegisters); }
#endif
#else // __APPLE__
inline const user_regs_struct* GPRegisters() const { return &m_gpRegisters; }
inline const user_fpregs_struct* FPRegisters() const { return &m_fpRegisters; }
#if defined(__i386__)
inline const user_fpxregs_struct* FPXRegisters() const { return &m_fpxRegisters; }
#elif defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
inline const user_vfpregs_struct* VFPRegisters() const { return &m_vfpRegisters; }
#endif
#if defined(__x86_64__)
inline const uint64_t GetInstructionPointer() const { return m_gpRegisters.rip; }
inline const uint64_t GetStackPointer() const { return m_gpRegisters.rsp; }
inline const uint64_t GetFramePointer() const { return m_gpRegisters.rbp; }
#elif defined(__aarch64__)
inline const uint64_t GetInstructionPointer() const { return MCREG_Pc(m_gpRegisters); }
inline const uint64_t GetStackPointer() const { return MCREG_Sp(m_gpRegisters); }
inline const uint64_t GetFramePointer() const { return MCREG_Fp(m_gpRegisters); }
#elif defined(__arm__)
inline const uint64_t GetInstructionPointer() const { return m_gpRegisters.ARM_pc; }
inline const uint64_t GetStackPointer() const { return m_gpRegisters.ARM_sp; }
inline const uint64_t GetFramePointer() const { return m_gpRegisters.ARM_fp; }
#endif
#endif // __APPLE__
private:
void UnwindNativeFrames(CONTEXT* pContext);
void GatherStackFrames(CONTEXT* pContext, IXCLRDataStackWalk* pStackwalk);
void AddStackFrame(const StackFrame& frame);
#ifndef __APPLE__
bool GetRegistersWithPTrace();
#endif
};
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/external/brotli/enc/encoder_dict.c | /* Copyright 2017 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
#include "./encoder_dict.h"
#include "../common/dictionary.h"
#include "../common/transform.h"
#include "./dictionary_hash.h"
#include "./hash.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
void BrotliInitEncoderDictionary(BrotliEncoderDictionary* dict) {
dict->words = BrotliGetDictionary();
dict->num_transforms = (uint32_t)BrotliGetTransforms()->num_transforms;
dict->hash_table_words = kStaticDictionaryHashWords;
dict->hash_table_lengths = kStaticDictionaryHashLengths;
dict->buckets = kStaticDictionaryBuckets;
dict->dict_words = kStaticDictionaryWords;
dict->cutoffTransformsCount = kCutoffTransformsCount;
dict->cutoffTransforms = kCutoffTransforms;
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
| /* Copyright 2017 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
#include "./encoder_dict.h"
#include "../common/dictionary.h"
#include "../common/transform.h"
#include "./dictionary_hash.h"
#include "./hash.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
void BrotliInitEncoderDictionary(BrotliEncoderDictionary* dict) {
dict->words = BrotliGetDictionary();
dict->num_transforms = (uint32_t)BrotliGetTransforms()->num_transforms;
dict->hash_table_words = kStaticDictionaryHashWords;
dict->hash_table_lengths = kStaticDictionaryHashLengths;
dict->buckets = kStaticDictionaryBuckets;
dict->dict_words = kStaticDictionaryWords;
dict->cutoffTransformsCount = kCutoffTransformsCount;
dict->cutoffTransforms = kCutoffTransforms;
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/eventpipe/ds-eventpipe-protocol.h | #ifndef __DIAGNOSTICS_EVENTPIPE_PROTOCOL_H__
#define __DIAGNOSTICS_EVENTPIPE_PROTOCOL_H__
#include "ds-rt-config.h"
#ifdef ENABLE_PERFTRACING
#include "ds-types.h"
#include "ds-ipc.h"
#undef DS_IMPL_GETTER_SETTER
#ifdef DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER
#define DS_IMPL_GETTER_SETTER
#endif
#include "ds-getter-setter.h"
/*
* EventPipeCollectTracingCommandPayload
*/
// Command = 0x0202
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracingCommandPayload {
#else
struct _EventPipeCollectTracingCommandPayload_Internal {
#endif
// The protocol buffer is defined as:
// X, Y, Z means encode bytes for X followed by bytes for Y followed by bytes for Z
// message = uint circularBufferMB, uint format, array<provider_config> providers
// uint = 4 little endian bytes
// wchar = 2 little endian bytes, UTF16 encoding
// array<T> = uint length, length # of Ts
// string = (array<char> where the last char must = 0) or (length = 0)
// provider_config = ulong keywords, uint logLevel, string provider_name, string filter_data
uint8_t *incoming_buffer;
ep_rt_provider_config_array_t provider_configs;
uint32_t circular_buffer_size_in_mb;
EventPipeSerializationFormat serialization_format;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracingCommandPayload {
uint8_t _internal [sizeof (struct _EventPipeCollectTracingCommandPayload_Internal)];
};
#endif
EventPipeCollectTracingCommandPayload *
ds_eventpipe_collect_tracing_command_payload_alloc (void);
void
ds_eventpipe_collect_tracing_command_payload_free (EventPipeCollectTracingCommandPayload *payload);
/*
* EventPipeCollectTracing2CommandPayload
*/
// Command = 0x0202
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracing2CommandPayload {
#else
struct _EventPipeCollectTracing2CommandPayload_Internal {
#endif
// The protocol buffer is defined as:
// X, Y, Z means encode bytes for X followed by bytes for Y followed by bytes for Z
// message = uint circularBufferMB, uint format, array<provider_config> providers
// uint = 4 little endian bytes
// wchar = 2 little endian bytes, UTF16 encoding
// array<T> = uint length, length # of Ts
// string = (array<char> where the last char must = 0) or (length = 0)
// provider_config = ulong keywords, uint logLevel, string provider_name, string filter_data
uint8_t *incoming_buffer;
ep_rt_provider_config_array_t provider_configs;
uint32_t circular_buffer_size_in_mb;
EventPipeSerializationFormat serialization_format;
bool rundown_requested;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracing2CommandPayload {
uint8_t _internal [sizeof (struct _EventPipeCollectTracing2CommandPayload_Internal)];
};
#endif
EventPipeCollectTracing2CommandPayload *
ds_eventpipe_collect_tracing2_command_payload_alloc (void);
void
ds_eventpipe_collect_tracing2_command_payload_free (EventPipeCollectTracing2CommandPayload *payload);
/*
* EventPipeStopTracingCommandPayload
*/
// Command = 0x0201
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeStopTracingCommandPayload {
#else
struct _EventPipeStopTracingCommandPayload_Internal {
#endif
EventPipeSessionID session_id;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeStopTracingCommandPayload {
uint8_t _internal [sizeof (struct _EventPipeStopTracingCommandPayload_Internal)];
};
#endif
void
ds_eventpipe_stop_tracing_command_payload_free (EventPipeStopTracingCommandPayload *payload);
/*
* EventPipeProtocolHelper
*/
bool
ds_eventpipe_protocol_helper_handle_ipc_message (
DiagnosticsIpcMessage *message,
DiagnosticsIpcStream *stream);
#endif /* ENABLE_PERFTRACING */
#endif /* __DIAGNOSTICS_EVENTPIPE_PROTOCOL_H__ */
| #ifndef __DIAGNOSTICS_EVENTPIPE_PROTOCOL_H__
#define __DIAGNOSTICS_EVENTPIPE_PROTOCOL_H__
#include "ds-rt-config.h"
#ifdef ENABLE_PERFTRACING
#include "ds-types.h"
#include "ds-ipc.h"
#undef DS_IMPL_GETTER_SETTER
#ifdef DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER
#define DS_IMPL_GETTER_SETTER
#endif
#include "ds-getter-setter.h"
/*
* EventPipeCollectTracingCommandPayload
*/
// Command = 0x0202
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracingCommandPayload {
#else
struct _EventPipeCollectTracingCommandPayload_Internal {
#endif
// The protocol buffer is defined as:
// X, Y, Z means encode bytes for X followed by bytes for Y followed by bytes for Z
// message = uint circularBufferMB, uint format, array<provider_config> providers
// uint = 4 little endian bytes
// wchar = 2 little endian bytes, UTF16 encoding
// array<T> = uint length, length # of Ts
// string = (array<char> where the last char must = 0) or (length = 0)
// provider_config = ulong keywords, uint logLevel, string provider_name, string filter_data
uint8_t *incoming_buffer;
ep_rt_provider_config_array_t provider_configs;
uint32_t circular_buffer_size_in_mb;
EventPipeSerializationFormat serialization_format;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracingCommandPayload {
uint8_t _internal [sizeof (struct _EventPipeCollectTracingCommandPayload_Internal)];
};
#endif
EventPipeCollectTracingCommandPayload *
ds_eventpipe_collect_tracing_command_payload_alloc (void);
void
ds_eventpipe_collect_tracing_command_payload_free (EventPipeCollectTracingCommandPayload *payload);
/*
* EventPipeCollectTracing2CommandPayload
*/
// Command = 0x0202
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracing2CommandPayload {
#else
struct _EventPipeCollectTracing2CommandPayload_Internal {
#endif
// The protocol buffer is defined as:
// X, Y, Z means encode bytes for X followed by bytes for Y followed by bytes for Z
// message = uint circularBufferMB, uint format, array<provider_config> providers
// uint = 4 little endian bytes
// wchar = 2 little endian bytes, UTF16 encoding
// array<T> = uint length, length # of Ts
// string = (array<char> where the last char must = 0) or (length = 0)
// provider_config = ulong keywords, uint logLevel, string provider_name, string filter_data
uint8_t *incoming_buffer;
ep_rt_provider_config_array_t provider_configs;
uint32_t circular_buffer_size_in_mb;
EventPipeSerializationFormat serialization_format;
bool rundown_requested;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeCollectTracing2CommandPayload {
uint8_t _internal [sizeof (struct _EventPipeCollectTracing2CommandPayload_Internal)];
};
#endif
EventPipeCollectTracing2CommandPayload *
ds_eventpipe_collect_tracing2_command_payload_alloc (void);
void
ds_eventpipe_collect_tracing2_command_payload_free (EventPipeCollectTracing2CommandPayload *payload);
/*
* EventPipeStopTracingCommandPayload
*/
// Command = 0x0201
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeStopTracingCommandPayload {
#else
struct _EventPipeStopTracingCommandPayload_Internal {
#endif
EventPipeSessionID session_id;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_EVENTPIPE_PROTOCOL_GETTER_SETTER)
struct _EventPipeStopTracingCommandPayload {
uint8_t _internal [sizeof (struct _EventPipeStopTracingCommandPayload_Internal)];
};
#endif
void
ds_eventpipe_stop_tracing_command_payload_free (EventPipeStopTracingCommandPayload *payload);
/*
* EventPipeProtocolHelper
*/
bool
ds_eventpipe_protocol_helper_handle_ipc_message (
DiagnosticsIpcMessage *message,
DiagnosticsIpcStream *stream);
#endif /* ENABLE_PERFTRACING */
#endif /* __DIAGNOSTICS_EVENTPIPE_PROTOCOL_H__ */
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/pal/src/libunwind/src/tilegx/Linit_remote.c | #define UNW_LOCAL_ONLY
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Ginit_remote.c"
#endif
| #define UNW_LOCAL_ONLY
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Ginit_remote.c"
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/utils/mono-threads-wasm.c | #include "config.h"
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-compiler.h>
#if defined (USE_WASM_BACKEND)
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-mmap.h>
#include <glib.h>
#ifdef HOST_BROWSER
#include <emscripten.h>
#include <emscripten/stack.h>
#define round_down(addr, val) ((void*)((addr) & ~((val) - 1)))
EMSCRIPTEN_KEEPALIVE
static int
wasm_get_stack_base (void)
{
return emscripten_stack_get_end ();
}
EMSCRIPTEN_KEEPALIVE
static int
wasm_get_stack_size (void)
{
return (guint8*)emscripten_stack_get_base () - (guint8*)emscripten_stack_get_end ();
}
#else /* WASI */
static int
wasm_get_stack_base (void)
{
// TODO: For WASI, we need to ensure the stack location makes sense and won't interfere with the heap.
// Currently these hardcoded values are sufficient for a working prototype. It's an arbitrary nonzero
// value that aligns to 32 bits.
return 4;
}
static int
wasm_get_stack_size (void)
{
// TODO: For WASI, we need to ensure the stack location makes sense and won't interfere with the heap.
// Currently these hardcoded values are sufficient for a working prototype. It's an arbitrary nonzero
// value that aligns to 32 bits.
return 4;
}
#endif
int
mono_thread_info_get_system_max_stack_size (void)
{
return wasm_get_stack_size ();
}
void
mono_threads_suspend_init_signals (void)
{
}
void
mono_threads_suspend_init (void)
{
}
void
mono_threads_suspend_register (MonoThreadInfo *info)
{
}
gboolean
mono_threads_suspend_begin_async_resume (MonoThreadInfo *info)
{
return TRUE;
}
void
mono_threads_suspend_free (MonoThreadInfo *info)
{
}
gboolean
mono_threads_suspend_begin_async_suspend (MonoThreadInfo *info, gboolean interrupt_kernel)
{
return TRUE;
}
gboolean
mono_threads_suspend_check_suspend_result (MonoThreadInfo *info)
{
return TRUE;
}
void
mono_threads_suspend_abort_syscall (MonoThreadInfo *info)
{
}
gboolean
mono_native_thread_id_equals (MonoNativeThreadId id1, MonoNativeThreadId id2)
{
return id1 == id2;
}
MonoNativeThreadId
mono_native_thread_id_get (void)
{
#ifdef __EMSCRIPTEN_PTHREADS__
return pthread_self ();
#else
return (MonoNativeThreadId)1;
#endif
}
guint64
mono_native_thread_os_id_get (void)
{
#ifdef __EMSCRIPTEN_PTHREADS__
return (guint64)pthread_self ();
#else
return 1;
#endif
}
gint32
mono_native_thread_processor_id_get (void)
{
return -1;
}
MONO_API gboolean
mono_native_thread_create (MonoNativeThreadId *tid, gpointer func, gpointer arg)
{
g_error ("WASM doesn't support threading");
}
static const char *thread_name;
void
mono_native_thread_set_name (MonoNativeThreadId tid, const char *name)
{
thread_name = g_strdup (name);
}
gboolean
mono_native_thread_join (MonoNativeThreadId tid)
{
#ifdef __EMSCRIPTEN_PTHREADS__
void *res;
return !pthread_join (tid, &res);
#else
g_assert_not_reached ();
#endif
}
gboolean
mono_threads_platform_yield (void)
{
return TRUE;
}
void
mono_threads_platform_get_stack_bounds (guint8 **staddr, size_t *stsize)
{
int tmp;
#ifdef __EMSCRIPTEN_PTHREADS__
pthread_attr_t attr;
gint res;
*staddr = NULL;
*stsize = (size_t)-1;
res = pthread_attr_init (&attr);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_attr_init failed with \"%s\" (%d)", __func__, g_strerror (res), res);
res = pthread_getattr_np (pthread_self (), &attr);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_getattr_np failed with \"%s\" (%d)", __func__, g_strerror (res), res);
res = pthread_attr_getstack (&attr, (void**)staddr, stsize);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_attr_getstack failed with \"%s\" (%d)", __func__, g_strerror (res), res);
res = pthread_attr_destroy (&attr);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_attr_destroy failed with \"%s\" (%d)", __func__, g_strerror (res), res);
if (*staddr == NULL) {
*staddr = (guint8*)wasm_get_stack_base ();
*stsize = wasm_get_stack_size ();
}
#else
*staddr = (guint8*)wasm_get_stack_base ();
*stsize = wasm_get_stack_size ();
#endif
#ifdef HOST_WASI
// TODO: For WASI, we need to ensure the stack is positioned correctly and reintroduce these assertions.
// Currently it works anyway in prototypes (except these checks would fail)
#else
g_assert ((guint8*)&tmp > *staddr);
g_assert ((guint8*)&tmp < (guint8*)*staddr + *stsize);
#endif
}
gboolean
mono_thread_platform_create_thread (MonoThreadStart thread_fn, gpointer thread_data, gsize* const stack_size, MonoNativeThreadId *tid)
{
#ifdef __EMSCRIPTEN_PTHREADS__
pthread_attr_t attr;
pthread_t thread;
gint res;
gsize set_stack_size;
res = pthread_attr_init (&attr);
if (res != 0)
g_error ("%s: pthread_attr_init failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
#if 0
if (stack_size)
set_stack_size = *stack_size;
else
set_stack_size = 0;
#ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
if (set_stack_size == 0) {
#if HAVE_VALGRIND_MEMCHECK_H
if (RUNNING_ON_VALGRIND)
set_stack_size = 1 << 20;
else
set_stack_size = (SIZEOF_VOID_P / 4) * 1024 * 1024;
#else
set_stack_size = (SIZEOF_VOID_P / 4) * 1024 * 1024;
#endif
}
#ifdef PTHREAD_STACK_MIN
if (set_stack_size < PTHREAD_STACK_MIN)
set_stack_size = PTHREAD_STACK_MIN;
#endif
res = pthread_attr_setstacksize (&attr, set_stack_size);
if (res != 0)
g_error ("%s: pthread_attr_setstacksize failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
#endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
#endif
/* Actually start the thread */
res = pthread_create (&thread, &attr, (gpointer (*)(gpointer)) thread_fn, thread_data);
if (res) {
res = pthread_attr_destroy (&attr);
if (res != 0)
g_error ("%s: pthread_attr_destroy failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
return FALSE;
}
if (tid)
*tid = thread;
#if 0
if (stack_size) {
res = pthread_attr_getstacksize (&attr, stack_size);
if (res != 0)
g_error ("%s: pthread_attr_getstacksize failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
}
#endif
res = pthread_attr_destroy (&attr);
if (res != 0)
g_error ("%s: pthread_attr_destroy failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
return TRUE;
#elif defined(HOST_WASI)
return TRUE;
#else
g_assert_not_reached ();
#endif
}
void mono_threads_platform_init (void)
{
}
void
mono_threads_platform_exit (gsize exit_code)
{
#ifdef __EMSCRIPTEN_PTHREADS__
pthread_exit ((gpointer) exit_code);
#else
g_assert_not_reached ();
#endif
}
gboolean
mono_threads_platform_in_critical_region (THREAD_INFO_TYPE *info)
{
return FALSE;
}
void
mono_memory_barrier_process_wide (void)
{
}
#ifdef HOST_BROWSER
G_EXTERN_C
extern void schedule_background_exec (void);
static GSList *jobs;
void
mono_threads_schedule_background_job (background_job_cb cb)
{
if (!jobs)
schedule_background_exec ();
if (!g_slist_find (jobs, (gconstpointer)cb))
jobs = g_slist_prepend (jobs, (gpointer)cb);
}
G_EXTERN_C
EMSCRIPTEN_KEEPALIVE void
mono_background_exec (void);
G_EXTERN_C
EMSCRIPTEN_KEEPALIVE void
mono_background_exec (void)
{
GSList *j = jobs, *cur;
jobs = NULL;
for (cur = j; cur; cur = cur->next) {
background_job_cb cb = (background_job_cb)cur->data;
cb ();
}
g_slist_free (j);
}
#endif /* HOST_BROWSER */
#else
MONO_EMPTY_SOURCE_FILE (mono_threads_wasm);
#endif
| #include "config.h"
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-compiler.h>
#if defined (USE_WASM_BACKEND)
#include <mono/utils/mono-threads.h>
#include <mono/utils/mono-mmap.h>
#include <glib.h>
#ifdef HOST_BROWSER
#include <emscripten.h>
#include <emscripten/stack.h>
#define round_down(addr, val) ((void*)((addr) & ~((val) - 1)))
EMSCRIPTEN_KEEPALIVE
static int
wasm_get_stack_base (void)
{
return emscripten_stack_get_end ();
}
EMSCRIPTEN_KEEPALIVE
static int
wasm_get_stack_size (void)
{
return (guint8*)emscripten_stack_get_base () - (guint8*)emscripten_stack_get_end ();
}
#else /* WASI */
static int
wasm_get_stack_base (void)
{
// TODO: For WASI, we need to ensure the stack location makes sense and won't interfere with the heap.
// Currently these hardcoded values are sufficient for a working prototype. It's an arbitrary nonzero
// value that aligns to 32 bits.
return 4;
}
static int
wasm_get_stack_size (void)
{
// TODO: For WASI, we need to ensure the stack location makes sense and won't interfere with the heap.
// Currently these hardcoded values are sufficient for a working prototype. It's an arbitrary nonzero
// value that aligns to 32 bits.
return 4;
}
#endif
int
mono_thread_info_get_system_max_stack_size (void)
{
return wasm_get_stack_size ();
}
void
mono_threads_suspend_init_signals (void)
{
}
void
mono_threads_suspend_init (void)
{
}
void
mono_threads_suspend_register (MonoThreadInfo *info)
{
}
gboolean
mono_threads_suspend_begin_async_resume (MonoThreadInfo *info)
{
return TRUE;
}
void
mono_threads_suspend_free (MonoThreadInfo *info)
{
}
gboolean
mono_threads_suspend_begin_async_suspend (MonoThreadInfo *info, gboolean interrupt_kernel)
{
return TRUE;
}
gboolean
mono_threads_suspend_check_suspend_result (MonoThreadInfo *info)
{
return TRUE;
}
void
mono_threads_suspend_abort_syscall (MonoThreadInfo *info)
{
}
gboolean
mono_native_thread_id_equals (MonoNativeThreadId id1, MonoNativeThreadId id2)
{
return id1 == id2;
}
MonoNativeThreadId
mono_native_thread_id_get (void)
{
#ifdef __EMSCRIPTEN_PTHREADS__
return pthread_self ();
#else
return (MonoNativeThreadId)1;
#endif
}
guint64
mono_native_thread_os_id_get (void)
{
#ifdef __EMSCRIPTEN_PTHREADS__
return (guint64)pthread_self ();
#else
return 1;
#endif
}
gint32
mono_native_thread_processor_id_get (void)
{
return -1;
}
MONO_API gboolean
mono_native_thread_create (MonoNativeThreadId *tid, gpointer func, gpointer arg)
{
g_error ("WASM doesn't support threading");
}
static const char *thread_name;
void
mono_native_thread_set_name (MonoNativeThreadId tid, const char *name)
{
thread_name = g_strdup (name);
}
gboolean
mono_native_thread_join (MonoNativeThreadId tid)
{
#ifdef __EMSCRIPTEN_PTHREADS__
void *res;
return !pthread_join (tid, &res);
#else
g_assert_not_reached ();
#endif
}
gboolean
mono_threads_platform_yield (void)
{
return TRUE;
}
void
mono_threads_platform_get_stack_bounds (guint8 **staddr, size_t *stsize)
{
int tmp;
#ifdef __EMSCRIPTEN_PTHREADS__
pthread_attr_t attr;
gint res;
*staddr = NULL;
*stsize = (size_t)-1;
res = pthread_attr_init (&attr);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_attr_init failed with \"%s\" (%d)", __func__, g_strerror (res), res);
res = pthread_getattr_np (pthread_self (), &attr);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_getattr_np failed with \"%s\" (%d)", __func__, g_strerror (res), res);
res = pthread_attr_getstack (&attr, (void**)staddr, stsize);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_attr_getstack failed with \"%s\" (%d)", __func__, g_strerror (res), res);
res = pthread_attr_destroy (&attr);
if (G_UNLIKELY (res != 0))
g_error ("%s: pthread_attr_destroy failed with \"%s\" (%d)", __func__, g_strerror (res), res);
if (*staddr == NULL) {
*staddr = (guint8*)wasm_get_stack_base ();
*stsize = wasm_get_stack_size ();
}
#else
*staddr = (guint8*)wasm_get_stack_base ();
*stsize = wasm_get_stack_size ();
#endif
#ifdef HOST_WASI
// TODO: For WASI, we need to ensure the stack is positioned correctly and reintroduce these assertions.
// Currently it works anyway in prototypes (except these checks would fail)
#else
g_assert ((guint8*)&tmp > *staddr);
g_assert ((guint8*)&tmp < (guint8*)*staddr + *stsize);
#endif
}
gboolean
mono_thread_platform_create_thread (MonoThreadStart thread_fn, gpointer thread_data, gsize* const stack_size, MonoNativeThreadId *tid)
{
#ifdef __EMSCRIPTEN_PTHREADS__
pthread_attr_t attr;
pthread_t thread;
gint res;
gsize set_stack_size;
res = pthread_attr_init (&attr);
if (res != 0)
g_error ("%s: pthread_attr_init failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
#if 0
if (stack_size)
set_stack_size = *stack_size;
else
set_stack_size = 0;
#ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
if (set_stack_size == 0) {
#if HAVE_VALGRIND_MEMCHECK_H
if (RUNNING_ON_VALGRIND)
set_stack_size = 1 << 20;
else
set_stack_size = (SIZEOF_VOID_P / 4) * 1024 * 1024;
#else
set_stack_size = (SIZEOF_VOID_P / 4) * 1024 * 1024;
#endif
}
#ifdef PTHREAD_STACK_MIN
if (set_stack_size < PTHREAD_STACK_MIN)
set_stack_size = PTHREAD_STACK_MIN;
#endif
res = pthread_attr_setstacksize (&attr, set_stack_size);
if (res != 0)
g_error ("%s: pthread_attr_setstacksize failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
#endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
#endif
/* Actually start the thread */
res = pthread_create (&thread, &attr, (gpointer (*)(gpointer)) thread_fn, thread_data);
if (res) {
res = pthread_attr_destroy (&attr);
if (res != 0)
g_error ("%s: pthread_attr_destroy failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
return FALSE;
}
if (tid)
*tid = thread;
#if 0
if (stack_size) {
res = pthread_attr_getstacksize (&attr, stack_size);
if (res != 0)
g_error ("%s: pthread_attr_getstacksize failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
}
#endif
res = pthread_attr_destroy (&attr);
if (res != 0)
g_error ("%s: pthread_attr_destroy failed, error: \"%s\" (%d)", __func__, g_strerror (res), res);
return TRUE;
#elif defined(HOST_WASI)
return TRUE;
#else
g_assert_not_reached ();
#endif
}
void mono_threads_platform_init (void)
{
}
void
mono_threads_platform_exit (gsize exit_code)
{
#ifdef __EMSCRIPTEN_PTHREADS__
pthread_exit ((gpointer) exit_code);
#else
g_assert_not_reached ();
#endif
}
gboolean
mono_threads_platform_in_critical_region (THREAD_INFO_TYPE *info)
{
return FALSE;
}
void
mono_memory_barrier_process_wide (void)
{
}
#ifdef HOST_BROWSER
G_EXTERN_C
extern void schedule_background_exec (void);
static GSList *jobs;
void
mono_threads_schedule_background_job (background_job_cb cb)
{
if (!jobs)
schedule_background_exec ();
if (!g_slist_find (jobs, (gconstpointer)cb))
jobs = g_slist_prepend (jobs, (gpointer)cb);
}
G_EXTERN_C
EMSCRIPTEN_KEEPALIVE void
mono_background_exec (void);
G_EXTERN_C
EMSCRIPTEN_KEEPALIVE void
mono_background_exec (void)
{
GSList *j = jobs, *cur;
jobs = NULL;
for (cur = j; cur; cur = cur->next) {
background_job_cb cb = (background_job_cb)cur->data;
cb ();
}
g_slist_free (j);
}
#endif /* HOST_BROWSER */
#else
MONO_EMPTY_SOURCE_FILE (mono_threads_wasm);
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/eventpipe/ep-event.c | #include "ep-rt-config.h"
#ifdef ENABLE_PERFTRACING
#if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES)
#define EP_IMPL_EVENT_GETTER_SETTER
#include "ep-event.h"
#include "ep-metadata-generator.h"
/*
* Forward declares of all static functions.
*/
static
void
event_build_minimum_metadata (
EventPipeEvent *ep_event,
uint8_t **metadata,
uint32_t *metadata_len);
/*
* EventPipeEvent.
*/
static
void
event_build_minimum_metadata (
EventPipeEvent *ep_event,
uint8_t **metadata,
uint32_t *metadata_len)
{
EP_ASSERT (ep_event != NULL);
EP_ASSERT (metadata != NULL);
EP_ASSERT (metadata_len != NULL);
size_t output_len = 0;
ep_char16_t empty_string [1] = { 0 };
*metadata = ep_metadata_generator_generate_event_metadata (
ep_event->event_id,
empty_string,
ep_event->keywords,
ep_event->event_version,
ep_event->level,
0,
NULL,
0,
&output_len);
*metadata_len = (uint32_t)output_len;
}
EventPipeEvent *
ep_event_alloc (
EventPipeProvider *provider,
uint64_t keywords,
uint32_t event_id,
uint32_t event_version,
EventPipeEventLevel level,
bool need_stack,
const uint8_t *metadata,
uint32_t metadata_len)
{
EP_ASSERT (provider != NULL);
EventPipeEvent *instance = ep_rt_object_alloc (EventPipeEvent);
ep_raise_error_if_nok (instance != NULL);
instance->provider = provider;
instance->keywords = keywords;
instance->event_id = event_id;
instance->event_version = event_version;
instance->level = level;
instance->need_stack = need_stack;
instance->enabled_mask = 0;
if (metadata != NULL) {
instance->metadata = ep_rt_byte_array_alloc (metadata_len);
ep_raise_error_if_nok (instance->metadata != NULL);
memcpy (instance->metadata, metadata, metadata_len);
instance->metadata_len = metadata_len;
} else {
// if metadata is not provided, we have to build the minimum version. It's required by the serialization contract.
event_build_minimum_metadata (instance, &(instance->metadata), &(instance->metadata_len));
}
ep_on_exit:
return instance;
ep_on_error:
ep_event_free (instance);
instance = NULL;
ep_exit_error_handler ();
}
void
ep_event_free (EventPipeEvent *ep_event)
{
ep_return_void_if_nok (ep_event != NULL);
ep_rt_byte_array_free (ep_event->metadata);
ep_rt_object_free (ep_event);
}
#endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */
#endif /* ENABLE_PERFTRACING */
#ifndef EP_INCLUDE_SOURCE_FILES
extern const char quiet_linker_empty_file_warning_eventpipe_event_internals;
const char quiet_linker_empty_file_warning_eventpipe_event_internals = 0;
#endif
| #include "ep-rt-config.h"
#ifdef ENABLE_PERFTRACING
#if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES)
#define EP_IMPL_EVENT_GETTER_SETTER
#include "ep-event.h"
#include "ep-metadata-generator.h"
/*
* Forward declares of all static functions.
*/
static
void
event_build_minimum_metadata (
EventPipeEvent *ep_event,
uint8_t **metadata,
uint32_t *metadata_len);
/*
* EventPipeEvent.
*/
static
void
event_build_minimum_metadata (
EventPipeEvent *ep_event,
uint8_t **metadata,
uint32_t *metadata_len)
{
EP_ASSERT (ep_event != NULL);
EP_ASSERT (metadata != NULL);
EP_ASSERT (metadata_len != NULL);
size_t output_len = 0;
ep_char16_t empty_string [1] = { 0 };
*metadata = ep_metadata_generator_generate_event_metadata (
ep_event->event_id,
empty_string,
ep_event->keywords,
ep_event->event_version,
ep_event->level,
0,
NULL,
0,
&output_len);
*metadata_len = (uint32_t)output_len;
}
EventPipeEvent *
ep_event_alloc (
EventPipeProvider *provider,
uint64_t keywords,
uint32_t event_id,
uint32_t event_version,
EventPipeEventLevel level,
bool need_stack,
const uint8_t *metadata,
uint32_t metadata_len)
{
EP_ASSERT (provider != NULL);
EventPipeEvent *instance = ep_rt_object_alloc (EventPipeEvent);
ep_raise_error_if_nok (instance != NULL);
instance->provider = provider;
instance->keywords = keywords;
instance->event_id = event_id;
instance->event_version = event_version;
instance->level = level;
instance->need_stack = need_stack;
instance->enabled_mask = 0;
if (metadata != NULL) {
instance->metadata = ep_rt_byte_array_alloc (metadata_len);
ep_raise_error_if_nok (instance->metadata != NULL);
memcpy (instance->metadata, metadata, metadata_len);
instance->metadata_len = metadata_len;
} else {
// if metadata is not provided, we have to build the minimum version. It's required by the serialization contract.
event_build_minimum_metadata (instance, &(instance->metadata), &(instance->metadata_len));
}
ep_on_exit:
return instance;
ep_on_error:
ep_event_free (instance);
instance = NULL;
ep_exit_error_handler ();
}
void
ep_event_free (EventPipeEvent *ep_event)
{
ep_return_void_if_nok (ep_event != NULL);
ep_rt_byte_array_free (ep_event->metadata);
ep_rt_object_free (ep_event);
}
#endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */
#endif /* ENABLE_PERFTRACING */
#ifndef EP_INCLUDE_SOURCE_FILES
extern const char quiet_linker_empty_file_warning_eventpipe_event_internals;
const char quiet_linker_empty_file_warning_eventpipe_event_internals = 0;
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/utils/freebsd-dwarf.h | /*-
* Copyright (c) 2007 John Birrell ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
*notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/lib/libdwarf/dwarf.h,v 1.1.2.1 2008/08/27 04:41:15 jb Exp $
*/
#ifndef _DWARF_H_
#define _DWARF_H_
#define DW_TAG_array_type 0x01
#define DW_TAG_class_type 0x02
#define DW_TAG_entry_point 0x03
#define DW_TAG_enumeration_type 0x04
#define DW_TAG_formal_parameter 0x05
#define DW_TAG_imported_declaration 0x08
#define DW_TAG_label 0x0a
#define DW_TAG_lexical_block 0x0b
#define DW_TAG_member 0x0d
#define DW_TAG_pointer_type 0x0f
#define DW_TAG_reference_type 0x10
#define DW_TAG_compile_unit 0x11
#define DW_TAG_string_type 0x12
#define DW_TAG_structure_type 0x13
#define DW_TAG_subroutine_type 0x15
#define DW_TAG_typedef 0x16
#define DW_TAG_union_type 0x17
#define DW_TAG_unspecified_parameters 0x18
#define DW_TAG_variant 0x19
#define DW_TAG_common_block 0x1a
#define DW_TAG_common_inclusion 0x1b
#define DW_TAG_inheritance 0x1c
#define DW_TAG_inlined_subroutine 0x1d
#define DW_TAG_module 0x1e
#define DW_TAG_ptr_to_member_type 0x1f
#define DW_TAG_set_type 0x20
#define DW_TAG_subrange_type 0x21
#define DW_TAG_with_stmt 0x22
#define DW_TAG_access_declaration 0x23
#define DW_TAG_base_type 0x24
#define DW_TAG_catch_block 0x25
#define DW_TAG_const_type 0x26
#define DW_TAG_constant 0x27
#define DW_TAG_enumerator 0x28
#define DW_TAG_friend 0x2a
#define DW_TAG_namelist 0x2b
#define DW_TAG_namelist_item 0x2c
#define DW_TAG_packed_type 0x2d
#define DW_TAG_subprogram 0x2e
#define DW_TAG_template_type_parameter 0x2f
#define DW_TAG_template_type_param 0x2f
#define DW_TAG_template_value_parameter 0x30
#define DW_TAG_template_value_param 0x30
#define DW_TAG_thrown_type 0x31
#define DW_TAG_try_block 0x32
#define DW_TAG_variant_part 0x33
#define DW_TAG_variable 0x34
#define DW_TAG_volatile_type 0x35
#define DW_TAG_dwarf_procedure 0x36
#define DW_TAG_restrict_type 0x37
#define DW_TAG_interface_type 0x38
#define DW_TAG_namespace 0x39
#define DW_TAG_imported_module 0x3a
#define DW_TAG_unspecified_type 0x3b
#define DW_TAG_partial_unit 0x3c
#define DW_TAG_imported_unit 0x3d
#define DW_TAG_condition 0x3f
#define DW_TAG_shared_type 0x40
#define DW_TAG_lo_user 0x4080
#define DW_TAG_hi_user 0xffff
#define DW_CHILDREN_no 0x00
#define DW_CHILDREN_yes 0x01
#define DW_AT_sibling 0x01
#define DW_AT_location 0x02
#define DW_AT_name 0x03
#define DW_AT_ordering 0x09
#define DW_AT_subscr_data 0x0a
#define DW_AT_byte_size 0x0b
#define DW_AT_bit_offset 0x0c
#define DW_AT_bit_size 0x0d
#define DW_AT_element_list 0x0f
#define DW_AT_stmt_list 0x10
#define DW_AT_low_pc 0x11
#define DW_AT_high_pc 0x12
#define DW_AT_language 0x13
#define DW_AT_member 0x14
#define DW_AT_discr 0x15
#define DW_AT_discr_value 0x16
#define DW_AT_visibility 0x17
#define DW_AT_import 0x18
#define DW_AT_string_length 0x19
#define DW_AT_common_reference 0x1a
#define DW_AT_comp_dir 0x1b
#define DW_AT_const_value 0x1c
#define DW_AT_containing_type 0x1d
#define DW_AT_default_value 0x1e
#define DW_AT_inline 0x20
#define DW_AT_is_optional 0x21
#define DW_AT_lower_bound 0x22
#define DW_AT_producer 0x25
#define DW_AT_prototyped 0x27
#define DW_AT_return_addr 0x2a
#define DW_AT_start_scope 0x2c
#define DW_AT_bit_stride 0x2e
#define DW_AT_stride_size 0x2e
#define DW_AT_upper_bound 0x2f
#define DW_AT_abstract_origin 0x31
#define DW_AT_accessibility 0x32
#define DW_AT_address_class 0x33
#define DW_AT_artificial 0x34
#define DW_AT_base_types 0x35
#define DW_AT_calling_convention 0x36
#define DW_AT_count 0x37
#define DW_AT_data_member_location 0x38
#define DW_AT_decl_column 0x39
#define DW_AT_decl_file 0x3a
#define DW_AT_decl_line 0x3b
#define DW_AT_declaration 0x3c
#define DW_AT_discr_list 0x3d
#define DW_AT_encoding 0x3e
#define DW_AT_external 0x3f
#define DW_AT_frame_base 0x40
#define DW_AT_friend 0x41
#define DW_AT_identifier_case 0x42
#define DW_AT_macro_info 0x43
#define DW_AT_namelist_item 0x44
#define DW_AT_priority 0x45
#define DW_AT_segment 0x46
#define DW_AT_specification 0x47
#define DW_AT_static_link 0x48
#define DW_AT_type 0x49
#define DW_AT_use_location 0x4a
#define DW_AT_variable_parameter 0x4b
#define DW_AT_virtuality 0x4c
#define DW_AT_vtable_elem_location 0x4d
#define DW_AT_description 0x5a
#define DW_AT_lo_user 0x2000
#define DW_AT_hi_user 0x3fff
#define DW_FORM_addr 0x01
#define DW_FORM_block2 0x03
#define DW_FORM_block4 0x04
#define DW_FORM_data2 0x05
#define DW_FORM_data4 0x06
#define DW_FORM_data8 0x07
#define DW_FORM_string 0x08
#define DW_FORM_block 0x09
#define DW_FORM_block1 0x0a
#define DW_FORM_data1 0x0b
#define DW_FORM_flag 0x0c
#define DW_FORM_sdata 0x0d
#define DW_FORM_strp 0x0e
#define DW_FORM_udata 0x0f
#define DW_FORM_ref_addr 0x10
#define DW_FORM_ref1 0x11
#define DW_FORM_ref2 0x12
#define DW_FORM_ref4 0x13
#define DW_FORM_ref8 0x14
#define DW_FORM_ref_udata 0x15
#define DW_FORM_indirect 0x16
#define DW_OP_addr 0x03
#define DW_OP_deref 0x06
#define DW_OP_const1u 0x08
#define DW_OP_const1s 0x09
#define DW_OP_const2u 0x0a
#define DW_OP_const2s 0x0b
#define DW_OP_const4u 0x0c
#define DW_OP_const4s 0x0d
#define DW_OP_const8u 0x0e
#define DW_OP_const8s 0x0f
#define DW_OP_constu 0x10
#define DW_OP_consts 0x11
#define DW_OP_dup 0x12
#define DW_OP_drop 0x13
#define DW_OP_over 0x14
#define DW_OP_pick 0x15
#define DW_OP_swap 0x16
#define DW_OP_rot 0x17
#define DW_OP_xderef 0x18
#define DW_OP_abs 0x19
#define DW_OP_and 0x1a
#define DW_OP_div 0x1b
#define DW_OP_minus 0x1c
#define DW_OP_mod 0x1d
#define DW_OP_mul 0x1e
#define DW_OP_neg 0x1f
#define DW_OP_not 0x20
#define DW_OP_or 0x21
#define DW_OP_plus 0x22
#define DW_OP_plus_uconst 0x23
#define DW_OP_shl 0x24
#define DW_OP_shr 0x25
#define DW_OP_shra 0x26
#define DW_OP_xor 0x27
#define DW_OP_bra 0x28
#define DW_OP_eq 0x29
#define DW_OP_ge 0x2a
#define DW_OP_gt 0x2b
#define DW_OP_le 0x2c
#define DW_OP_lt 0x2d
#define DW_OP_ne 0x2e
#define DW_OP_skip 0x2f
#define DW_OP_lit0 0x30
#define DW_OP_lit1 0x31
#define DW_OP_lit2 0x32
#define DW_OP_lit3 0x33
#define DW_OP_lit4 0x34
#define DW_OP_lit5 0x35
#define DW_OP_lit6 0x36
#define DW_OP_lit7 0x37
#define DW_OP_lit8 0x38
#define DW_OP_lit9 0x39
#define DW_OP_lit10 0x3a
#define DW_OP_lit11 0x3b
#define DW_OP_lit12 0x3c
#define DW_OP_lit13 0x3d
#define DW_OP_lit14 0x3e
#define DW_OP_lit15 0x3f
#define DW_OP_lit16 0x40
#define DW_OP_lit17 0x41
#define DW_OP_lit18 0x42
#define DW_OP_lit19 0x43
#define DW_OP_lit20 0x44
#define DW_OP_lit21 0x45
#define DW_OP_lit22 0x46
#define DW_OP_lit23 0x47
#define DW_OP_lit24 0x48
#define DW_OP_lit25 0x49
#define DW_OP_lit26 0x4a
#define DW_OP_lit27 0x4b
#define DW_OP_lit28 0x4c
#define DW_OP_lit29 0x4d
#define DW_OP_lit30 0x4e
#define DW_OP_lit31 0x4f
#define DW_OP_reg0 0x50
#define DW_OP_reg1 0x51
#define DW_OP_reg2 0x52
#define DW_OP_reg3 0x53
#define DW_OP_reg4 0x54
#define DW_OP_reg5 0x55
#define DW_OP_reg6 0x56
#define DW_OP_reg7 0x57
#define DW_OP_reg8 0x58
#define DW_OP_reg9 0x59
#define DW_OP_reg10 0x5a
#define DW_OP_reg11 0x5b
#define DW_OP_reg12 0x5c
#define DW_OP_reg13 0x5d
#define DW_OP_reg14 0x5e
#define DW_OP_reg15 0x5f
#define DW_OP_reg16 0x60
#define DW_OP_reg17 0x61
#define DW_OP_reg18 0x62
#define DW_OP_reg19 0x63
#define DW_OP_reg20 0x64
#define DW_OP_reg21 0x65
#define DW_OP_reg22 0x66
#define DW_OP_reg23 0x67
#define DW_OP_reg24 0x68
#define DW_OP_reg25 0x69
#define DW_OP_reg26 0x6a
#define DW_OP_reg27 0x6b
#define DW_OP_reg28 0x6c
#define DW_OP_reg29 0x6d
#define DW_OP_reg30 0x6e
#define DW_OP_reg31 0x6f
#define DW_OP_breg0 0x70
#define DW_OP_breg1 0x71
#define DW_OP_breg2 0x72
#define DW_OP_breg3 0x73
#define DW_OP_breg4 0x74
#define DW_OP_breg5 0x75
#define DW_OP_breg6 0x76
#define DW_OP_breg7 0x77
#define DW_OP_breg8 0x78
#define DW_OP_breg9 0x79
#define DW_OP_breg10 0x7a
#define DW_OP_breg11 0x7b
#define DW_OP_breg12 0x7c
#define DW_OP_breg13 0x7d
#define DW_OP_breg14 0x7e
#define DW_OP_breg15 0x7f
#define DW_OP_breg16 0x80
#define DW_OP_breg17 0x81
#define DW_OP_breg18 0x82
#define DW_OP_breg19 0x83
#define DW_OP_breg20 0x84
#define DW_OP_breg21 0x85
#define DW_OP_breg22 0x86
#define DW_OP_breg23 0x87
#define DW_OP_breg24 0x88
#define DW_OP_breg25 0x89
#define DW_OP_breg26 0x8a
#define DW_OP_breg27 0x8b
#define DW_OP_breg28 0x8c
#define DW_OP_breg29 0x8d
#define DW_OP_breg30 0x8e
#define DW_OP_breg31 0x8f
#define DW_OP_regx 0x90
#define DW_OP_fbreg 0x91
#define DW_OP_bregx 0x92
#define DW_OP_piece 0x93
#define DW_OP_deref_size 0x94
#define DW_OP_xderef_size 0x95
#define DW_OP_nop 0x96
#define DW_OP_lo_user 0xe0
#define DW_OP_hi_user 0xff
#define DW_ATE_address 0x1
#define DW_ATE_boolean 0x2
#define DW_ATE_complex_float 0x3
#define DW_ATE_float 0x4
#define DW_ATE_signed 0x5
#define DW_ATE_signed_char 0x6
#define DW_ATE_unsigned 0x7
#define DW_ATE_unsigned_char 0x8
#define DW_ATE_imaginary_float 0x9
#define DW_ATE_packed_decimal 0xa
#define DW_ATE_numeric_string 0xb
#define DW_ATE_edited 0xc
#define DW_ATE_signed_fixed 0xd
#define DW_ATE_unsigned_fixed 0xe
#define DW_ATE_decimal_float 0xf
#define DW_ATE_lo_user 0x80
#define DW_ATE_hi_user 0xff
#define DW_ACCESS_public 0x01
#define DW_ACCESS_protected 0x02
#define DW_ACCESS_private 0x03
#define DW_VIS_local 0x01
#define DW_VIS_exported 0x02
#define DW_VIS_qualified 0x03
#define DW_VIRTUALITY_none 0x00
#define DW_VIRTUALITY_virtual 0x01
#define DW_VIRTUALITY_pure_virtual 0x02
#define DW_LANG_C89 0x0001
#define DW_LANG_C 0x0002
#define DW_LANG_Ada83 0x0003
#define DW_LANG_C_plus_plus 0x0004
#define DW_LANG_Cobol74 0x0005
#define DW_LANG_Cobol85 0x0006
#define DW_LANG_Fortran77 0x0007
#define DW_LANG_Fortran90 0x0008
#define DW_LANG_Pascal83 0x0009
#define DW_LANG_Modula2 0x000a
#define DW_LANG_Java 0x000b
#define DW_LANG_C99 0x000c
#define DW_LANG_Ada95 0x000d
#define DW_LANG_Fortran95 0x000e
#define DW_LANG_PLI 0x000f
#define DW_LANG_ObjC 0x0010
#define DW_LANG_ObjC_plus_plus 0x0011
#define DW_LANG_UPC 0x0012
#define DW_LANG_D 0x0013
#define DW_LANG_lo_user 0x8000
#define DW_LANG_hi_user 0xffff
#define DW_ID_case_sensitive 0x00
#define DW_ID_up_case 0x01
#define DW_ID_down_case 0x02
#define DW_ID_case_insensitive 0x03
#define DW_CC_normal 0x01
#define DW_CC_program 0x02
#define DW_CC_nocall 0x03
#define DW_CC_lo_user 0x40
#define DW_CC_hi_user 0xff
#define DW_INL_not_inlined 0x00
#define DW_INL_inlined 0x01
#define DW_INL_declared_not_inlined 0x02
#define DW_INL_declared_inlined 0x03
#define DW_ORD_row_major 0x00
#define DW_ORD_col_major 0x01
#define DW_DSC_label 0x00
#define DW_DSC_range 0x01
#define DW_LNS_copy 0x01
#define DW_LNS_advance_pc 0x02
#define DW_LNS_advance_line 0x03
#define DW_LNS_set_file 0x04
#define DW_LNS_set_column 0x05
#define DW_LNS_negate_stmt 0x06
#define DW_LNS_set_basic_block 0x07
#define DW_LNS_const_add_pc 0x08
#define DW_LNS_fixed_advance_pc 0x09
#define DW_LNS_set_prologue_end 0x0a
#define DW_LNS_set_epilogue_begin 0x0b
#define DW_LNS_set_isa 0x0c
#define DW_LNE_end_sequence 0x01
#define DW_LNE_set_address 0x02
#define DW_LNE_define_file 0x03
#define DW_LNE_lo_user 0x80
#define DW_LNE_hi_user 0xff
#define DW_MACINFO_define 0x01
#define DW_MACINFO_undef 0x02
#define DW_MACINFO_start_file 0x03
#define DW_MACINFO_end_file 0x04
#define DW_MACINFO_vendor_ext 0xff
#define DW_CFA_advance_loc 0x40
#define DW_CFA_offset 0x80
#define DW_CFA_restore 0xc0
#define DW_CFA_extended 0
#define DW_CFA_nop 0x00
#define DW_CFA_set_loc 0x01
#define DW_CFA_advance_loc1 0x02
#define DW_CFA_advance_loc2 0x03
#define DW_CFA_advance_loc4 0x04
#define DW_CFA_offset_extended 0x05
#define DW_CFA_restore_extended 0x06
#define DW_CFA_undefined 0x07
#define DW_CFA_same_value 0x08
#define DW_CFA_register 0x09
#define DW_CFA_remember_state 0x0a
#define DW_CFA_restore_state 0x0b
#define DW_CFA_def_cfa 0x0c
#define DW_CFA_def_cfa_register 0x0d
#define DW_CFA_def_cfa_offset 0x0e
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_CFA_cfa_offset_extended_sf 0x11
#define DW_CFA_def_cfa_sf 0x12
#define DW_CFA_def_cfa_offset_sf 0x13
#define DW_CFA_val_offset 0x14
#define DW_CFA_val_offset_sf 0x15
#define DW_CFA_val_expression 0x16
#define DW_CFA_lo_user 0x1c
#define DW_CFA_high_user 0x3f
#endif /* !_DWARF_H_ */
| /*-
* Copyright (c) 2007 John Birrell ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
*notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/lib/libdwarf/dwarf.h,v 1.1.2.1 2008/08/27 04:41:15 jb Exp $
*/
#ifndef _DWARF_H_
#define _DWARF_H_
#define DW_TAG_array_type 0x01
#define DW_TAG_class_type 0x02
#define DW_TAG_entry_point 0x03
#define DW_TAG_enumeration_type 0x04
#define DW_TAG_formal_parameter 0x05
#define DW_TAG_imported_declaration 0x08
#define DW_TAG_label 0x0a
#define DW_TAG_lexical_block 0x0b
#define DW_TAG_member 0x0d
#define DW_TAG_pointer_type 0x0f
#define DW_TAG_reference_type 0x10
#define DW_TAG_compile_unit 0x11
#define DW_TAG_string_type 0x12
#define DW_TAG_structure_type 0x13
#define DW_TAG_subroutine_type 0x15
#define DW_TAG_typedef 0x16
#define DW_TAG_union_type 0x17
#define DW_TAG_unspecified_parameters 0x18
#define DW_TAG_variant 0x19
#define DW_TAG_common_block 0x1a
#define DW_TAG_common_inclusion 0x1b
#define DW_TAG_inheritance 0x1c
#define DW_TAG_inlined_subroutine 0x1d
#define DW_TAG_module 0x1e
#define DW_TAG_ptr_to_member_type 0x1f
#define DW_TAG_set_type 0x20
#define DW_TAG_subrange_type 0x21
#define DW_TAG_with_stmt 0x22
#define DW_TAG_access_declaration 0x23
#define DW_TAG_base_type 0x24
#define DW_TAG_catch_block 0x25
#define DW_TAG_const_type 0x26
#define DW_TAG_constant 0x27
#define DW_TAG_enumerator 0x28
#define DW_TAG_friend 0x2a
#define DW_TAG_namelist 0x2b
#define DW_TAG_namelist_item 0x2c
#define DW_TAG_packed_type 0x2d
#define DW_TAG_subprogram 0x2e
#define DW_TAG_template_type_parameter 0x2f
#define DW_TAG_template_type_param 0x2f
#define DW_TAG_template_value_parameter 0x30
#define DW_TAG_template_value_param 0x30
#define DW_TAG_thrown_type 0x31
#define DW_TAG_try_block 0x32
#define DW_TAG_variant_part 0x33
#define DW_TAG_variable 0x34
#define DW_TAG_volatile_type 0x35
#define DW_TAG_dwarf_procedure 0x36
#define DW_TAG_restrict_type 0x37
#define DW_TAG_interface_type 0x38
#define DW_TAG_namespace 0x39
#define DW_TAG_imported_module 0x3a
#define DW_TAG_unspecified_type 0x3b
#define DW_TAG_partial_unit 0x3c
#define DW_TAG_imported_unit 0x3d
#define DW_TAG_condition 0x3f
#define DW_TAG_shared_type 0x40
#define DW_TAG_lo_user 0x4080
#define DW_TAG_hi_user 0xffff
#define DW_CHILDREN_no 0x00
#define DW_CHILDREN_yes 0x01
#define DW_AT_sibling 0x01
#define DW_AT_location 0x02
#define DW_AT_name 0x03
#define DW_AT_ordering 0x09
#define DW_AT_subscr_data 0x0a
#define DW_AT_byte_size 0x0b
#define DW_AT_bit_offset 0x0c
#define DW_AT_bit_size 0x0d
#define DW_AT_element_list 0x0f
#define DW_AT_stmt_list 0x10
#define DW_AT_low_pc 0x11
#define DW_AT_high_pc 0x12
#define DW_AT_language 0x13
#define DW_AT_member 0x14
#define DW_AT_discr 0x15
#define DW_AT_discr_value 0x16
#define DW_AT_visibility 0x17
#define DW_AT_import 0x18
#define DW_AT_string_length 0x19
#define DW_AT_common_reference 0x1a
#define DW_AT_comp_dir 0x1b
#define DW_AT_const_value 0x1c
#define DW_AT_containing_type 0x1d
#define DW_AT_default_value 0x1e
#define DW_AT_inline 0x20
#define DW_AT_is_optional 0x21
#define DW_AT_lower_bound 0x22
#define DW_AT_producer 0x25
#define DW_AT_prototyped 0x27
#define DW_AT_return_addr 0x2a
#define DW_AT_start_scope 0x2c
#define DW_AT_bit_stride 0x2e
#define DW_AT_stride_size 0x2e
#define DW_AT_upper_bound 0x2f
#define DW_AT_abstract_origin 0x31
#define DW_AT_accessibility 0x32
#define DW_AT_address_class 0x33
#define DW_AT_artificial 0x34
#define DW_AT_base_types 0x35
#define DW_AT_calling_convention 0x36
#define DW_AT_count 0x37
#define DW_AT_data_member_location 0x38
#define DW_AT_decl_column 0x39
#define DW_AT_decl_file 0x3a
#define DW_AT_decl_line 0x3b
#define DW_AT_declaration 0x3c
#define DW_AT_discr_list 0x3d
#define DW_AT_encoding 0x3e
#define DW_AT_external 0x3f
#define DW_AT_frame_base 0x40
#define DW_AT_friend 0x41
#define DW_AT_identifier_case 0x42
#define DW_AT_macro_info 0x43
#define DW_AT_namelist_item 0x44
#define DW_AT_priority 0x45
#define DW_AT_segment 0x46
#define DW_AT_specification 0x47
#define DW_AT_static_link 0x48
#define DW_AT_type 0x49
#define DW_AT_use_location 0x4a
#define DW_AT_variable_parameter 0x4b
#define DW_AT_virtuality 0x4c
#define DW_AT_vtable_elem_location 0x4d
#define DW_AT_description 0x5a
#define DW_AT_lo_user 0x2000
#define DW_AT_hi_user 0x3fff
#define DW_FORM_addr 0x01
#define DW_FORM_block2 0x03
#define DW_FORM_block4 0x04
#define DW_FORM_data2 0x05
#define DW_FORM_data4 0x06
#define DW_FORM_data8 0x07
#define DW_FORM_string 0x08
#define DW_FORM_block 0x09
#define DW_FORM_block1 0x0a
#define DW_FORM_data1 0x0b
#define DW_FORM_flag 0x0c
#define DW_FORM_sdata 0x0d
#define DW_FORM_strp 0x0e
#define DW_FORM_udata 0x0f
#define DW_FORM_ref_addr 0x10
#define DW_FORM_ref1 0x11
#define DW_FORM_ref2 0x12
#define DW_FORM_ref4 0x13
#define DW_FORM_ref8 0x14
#define DW_FORM_ref_udata 0x15
#define DW_FORM_indirect 0x16
#define DW_OP_addr 0x03
#define DW_OP_deref 0x06
#define DW_OP_const1u 0x08
#define DW_OP_const1s 0x09
#define DW_OP_const2u 0x0a
#define DW_OP_const2s 0x0b
#define DW_OP_const4u 0x0c
#define DW_OP_const4s 0x0d
#define DW_OP_const8u 0x0e
#define DW_OP_const8s 0x0f
#define DW_OP_constu 0x10
#define DW_OP_consts 0x11
#define DW_OP_dup 0x12
#define DW_OP_drop 0x13
#define DW_OP_over 0x14
#define DW_OP_pick 0x15
#define DW_OP_swap 0x16
#define DW_OP_rot 0x17
#define DW_OP_xderef 0x18
#define DW_OP_abs 0x19
#define DW_OP_and 0x1a
#define DW_OP_div 0x1b
#define DW_OP_minus 0x1c
#define DW_OP_mod 0x1d
#define DW_OP_mul 0x1e
#define DW_OP_neg 0x1f
#define DW_OP_not 0x20
#define DW_OP_or 0x21
#define DW_OP_plus 0x22
#define DW_OP_plus_uconst 0x23
#define DW_OP_shl 0x24
#define DW_OP_shr 0x25
#define DW_OP_shra 0x26
#define DW_OP_xor 0x27
#define DW_OP_bra 0x28
#define DW_OP_eq 0x29
#define DW_OP_ge 0x2a
#define DW_OP_gt 0x2b
#define DW_OP_le 0x2c
#define DW_OP_lt 0x2d
#define DW_OP_ne 0x2e
#define DW_OP_skip 0x2f
#define DW_OP_lit0 0x30
#define DW_OP_lit1 0x31
#define DW_OP_lit2 0x32
#define DW_OP_lit3 0x33
#define DW_OP_lit4 0x34
#define DW_OP_lit5 0x35
#define DW_OP_lit6 0x36
#define DW_OP_lit7 0x37
#define DW_OP_lit8 0x38
#define DW_OP_lit9 0x39
#define DW_OP_lit10 0x3a
#define DW_OP_lit11 0x3b
#define DW_OP_lit12 0x3c
#define DW_OP_lit13 0x3d
#define DW_OP_lit14 0x3e
#define DW_OP_lit15 0x3f
#define DW_OP_lit16 0x40
#define DW_OP_lit17 0x41
#define DW_OP_lit18 0x42
#define DW_OP_lit19 0x43
#define DW_OP_lit20 0x44
#define DW_OP_lit21 0x45
#define DW_OP_lit22 0x46
#define DW_OP_lit23 0x47
#define DW_OP_lit24 0x48
#define DW_OP_lit25 0x49
#define DW_OP_lit26 0x4a
#define DW_OP_lit27 0x4b
#define DW_OP_lit28 0x4c
#define DW_OP_lit29 0x4d
#define DW_OP_lit30 0x4e
#define DW_OP_lit31 0x4f
#define DW_OP_reg0 0x50
#define DW_OP_reg1 0x51
#define DW_OP_reg2 0x52
#define DW_OP_reg3 0x53
#define DW_OP_reg4 0x54
#define DW_OP_reg5 0x55
#define DW_OP_reg6 0x56
#define DW_OP_reg7 0x57
#define DW_OP_reg8 0x58
#define DW_OP_reg9 0x59
#define DW_OP_reg10 0x5a
#define DW_OP_reg11 0x5b
#define DW_OP_reg12 0x5c
#define DW_OP_reg13 0x5d
#define DW_OP_reg14 0x5e
#define DW_OP_reg15 0x5f
#define DW_OP_reg16 0x60
#define DW_OP_reg17 0x61
#define DW_OP_reg18 0x62
#define DW_OP_reg19 0x63
#define DW_OP_reg20 0x64
#define DW_OP_reg21 0x65
#define DW_OP_reg22 0x66
#define DW_OP_reg23 0x67
#define DW_OP_reg24 0x68
#define DW_OP_reg25 0x69
#define DW_OP_reg26 0x6a
#define DW_OP_reg27 0x6b
#define DW_OP_reg28 0x6c
#define DW_OP_reg29 0x6d
#define DW_OP_reg30 0x6e
#define DW_OP_reg31 0x6f
#define DW_OP_breg0 0x70
#define DW_OP_breg1 0x71
#define DW_OP_breg2 0x72
#define DW_OP_breg3 0x73
#define DW_OP_breg4 0x74
#define DW_OP_breg5 0x75
#define DW_OP_breg6 0x76
#define DW_OP_breg7 0x77
#define DW_OP_breg8 0x78
#define DW_OP_breg9 0x79
#define DW_OP_breg10 0x7a
#define DW_OP_breg11 0x7b
#define DW_OP_breg12 0x7c
#define DW_OP_breg13 0x7d
#define DW_OP_breg14 0x7e
#define DW_OP_breg15 0x7f
#define DW_OP_breg16 0x80
#define DW_OP_breg17 0x81
#define DW_OP_breg18 0x82
#define DW_OP_breg19 0x83
#define DW_OP_breg20 0x84
#define DW_OP_breg21 0x85
#define DW_OP_breg22 0x86
#define DW_OP_breg23 0x87
#define DW_OP_breg24 0x88
#define DW_OP_breg25 0x89
#define DW_OP_breg26 0x8a
#define DW_OP_breg27 0x8b
#define DW_OP_breg28 0x8c
#define DW_OP_breg29 0x8d
#define DW_OP_breg30 0x8e
#define DW_OP_breg31 0x8f
#define DW_OP_regx 0x90
#define DW_OP_fbreg 0x91
#define DW_OP_bregx 0x92
#define DW_OP_piece 0x93
#define DW_OP_deref_size 0x94
#define DW_OP_xderef_size 0x95
#define DW_OP_nop 0x96
#define DW_OP_lo_user 0xe0
#define DW_OP_hi_user 0xff
#define DW_ATE_address 0x1
#define DW_ATE_boolean 0x2
#define DW_ATE_complex_float 0x3
#define DW_ATE_float 0x4
#define DW_ATE_signed 0x5
#define DW_ATE_signed_char 0x6
#define DW_ATE_unsigned 0x7
#define DW_ATE_unsigned_char 0x8
#define DW_ATE_imaginary_float 0x9
#define DW_ATE_packed_decimal 0xa
#define DW_ATE_numeric_string 0xb
#define DW_ATE_edited 0xc
#define DW_ATE_signed_fixed 0xd
#define DW_ATE_unsigned_fixed 0xe
#define DW_ATE_decimal_float 0xf
#define DW_ATE_lo_user 0x80
#define DW_ATE_hi_user 0xff
#define DW_ACCESS_public 0x01
#define DW_ACCESS_protected 0x02
#define DW_ACCESS_private 0x03
#define DW_VIS_local 0x01
#define DW_VIS_exported 0x02
#define DW_VIS_qualified 0x03
#define DW_VIRTUALITY_none 0x00
#define DW_VIRTUALITY_virtual 0x01
#define DW_VIRTUALITY_pure_virtual 0x02
#define DW_LANG_C89 0x0001
#define DW_LANG_C 0x0002
#define DW_LANG_Ada83 0x0003
#define DW_LANG_C_plus_plus 0x0004
#define DW_LANG_Cobol74 0x0005
#define DW_LANG_Cobol85 0x0006
#define DW_LANG_Fortran77 0x0007
#define DW_LANG_Fortran90 0x0008
#define DW_LANG_Pascal83 0x0009
#define DW_LANG_Modula2 0x000a
#define DW_LANG_Java 0x000b
#define DW_LANG_C99 0x000c
#define DW_LANG_Ada95 0x000d
#define DW_LANG_Fortran95 0x000e
#define DW_LANG_PLI 0x000f
#define DW_LANG_ObjC 0x0010
#define DW_LANG_ObjC_plus_plus 0x0011
#define DW_LANG_UPC 0x0012
#define DW_LANG_D 0x0013
#define DW_LANG_lo_user 0x8000
#define DW_LANG_hi_user 0xffff
#define DW_ID_case_sensitive 0x00
#define DW_ID_up_case 0x01
#define DW_ID_down_case 0x02
#define DW_ID_case_insensitive 0x03
#define DW_CC_normal 0x01
#define DW_CC_program 0x02
#define DW_CC_nocall 0x03
#define DW_CC_lo_user 0x40
#define DW_CC_hi_user 0xff
#define DW_INL_not_inlined 0x00
#define DW_INL_inlined 0x01
#define DW_INL_declared_not_inlined 0x02
#define DW_INL_declared_inlined 0x03
#define DW_ORD_row_major 0x00
#define DW_ORD_col_major 0x01
#define DW_DSC_label 0x00
#define DW_DSC_range 0x01
#define DW_LNS_copy 0x01
#define DW_LNS_advance_pc 0x02
#define DW_LNS_advance_line 0x03
#define DW_LNS_set_file 0x04
#define DW_LNS_set_column 0x05
#define DW_LNS_negate_stmt 0x06
#define DW_LNS_set_basic_block 0x07
#define DW_LNS_const_add_pc 0x08
#define DW_LNS_fixed_advance_pc 0x09
#define DW_LNS_set_prologue_end 0x0a
#define DW_LNS_set_epilogue_begin 0x0b
#define DW_LNS_set_isa 0x0c
#define DW_LNE_end_sequence 0x01
#define DW_LNE_set_address 0x02
#define DW_LNE_define_file 0x03
#define DW_LNE_lo_user 0x80
#define DW_LNE_hi_user 0xff
#define DW_MACINFO_define 0x01
#define DW_MACINFO_undef 0x02
#define DW_MACINFO_start_file 0x03
#define DW_MACINFO_end_file 0x04
#define DW_MACINFO_vendor_ext 0xff
#define DW_CFA_advance_loc 0x40
#define DW_CFA_offset 0x80
#define DW_CFA_restore 0xc0
#define DW_CFA_extended 0
#define DW_CFA_nop 0x00
#define DW_CFA_set_loc 0x01
#define DW_CFA_advance_loc1 0x02
#define DW_CFA_advance_loc2 0x03
#define DW_CFA_advance_loc4 0x04
#define DW_CFA_offset_extended 0x05
#define DW_CFA_restore_extended 0x06
#define DW_CFA_undefined 0x07
#define DW_CFA_same_value 0x08
#define DW_CFA_register 0x09
#define DW_CFA_remember_state 0x0a
#define DW_CFA_restore_state 0x0b
#define DW_CFA_def_cfa 0x0c
#define DW_CFA_def_cfa_register 0x0d
#define DW_CFA_def_cfa_offset 0x0e
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_CFA_cfa_offset_extended_sf 0x11
#define DW_CFA_def_cfa_sf 0x12
#define DW_CFA_def_cfa_offset_sf 0x13
#define DW_CFA_val_offset 0x14
#define DW_CFA_val_offset_sf 0x15
#define DW_CFA_val_expression 0x16
#define DW_CFA_lo_user 0x1c
#define DW_CFA_high_user 0x3f
#endif /* !_DWARF_H_ */
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/pal/src/libunwind/src/mi/_ReadULEB.c | #include <libunwind.h>
unw_word_t
_ReadULEB (unsigned char **dpp)
{
unsigned shift = 0;
unw_word_t byte, result = 0;
unsigned char *bp = *dpp;
while (1)
{
byte = *bp++;
result |= (byte & 0x7f) << shift;
if ((byte & 0x80) == 0)
break;
shift += 7;
}
*dpp = bp;
return result;
}
| #include <libunwind.h>
unw_word_t
_ReadULEB (unsigned char **dpp)
{
unsigned shift = 0;
unw_word_t byte, result = 0;
unsigned char *bp = *dpp;
while (1)
{
byte = *bp++;
result |= (byte & 0x7f) << shift;
if ((byte & 0x80) == 0)
break;
shift += 7;
}
*dpp = bp;
return result;
}
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/tools/superpmi/mcs/verbtoc.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// verbTOC.h - verb that creates a Table of Context for a MCH file
//----------------------------------------------------------
#ifndef _verbTOC
#define _verbTOC
class verbTOC
{
public:
static int DoWork(const char* nameOfInput1);
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// verbTOC.h - verb that creates a Table of Context for a MCH file
//----------------------------------------------------------
#ifndef _verbTOC
#define _verbTOC
class verbTOC
{
public:
static int DoWork(const char* nameOfInput1);
};
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/libs/System.Security.Cryptography.Native/pal_x509.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "opensslshim.h"
#include "pal_compiler.h"
#include "pal_crypto_types.h"
/*
These values should be kept in sync with System.Security.Cryptography.X509Certificates.X509RevocationFlag.
*/
typedef enum {
EndCertificateOnly = 0,
EntireChain = 1,
ExcludeRoot = 2,
} X509RevocationFlag;
/*
The error codes used when verifying X509 certificate chains.
These values should be kept in sync with Interop.Crypto.X509VerifyStatusCodeUniversal.
Codes specific to specific versions of OpenSSL can also be returned,
but are not represented in this enum due to their non-constant nature.
*/
typedef enum {
PAL_X509_V_OK = 0,
PAL_X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT = 2,
PAL_X509_V_ERR_UNABLE_TO_GET_CRL = 3,
PAL_X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE = 5,
PAL_X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY = 6,
PAL_X509_V_ERR_CERT_SIGNATURE_FAILURE = 7,
PAL_X509_V_ERR_CRL_SIGNATURE_FAILURE = 8,
PAL_X509_V_ERR_CERT_NOT_YET_VALID = 9,
PAL_X509_V_ERR_CERT_HAS_EXPIRED = 10,
PAL_X509_V_ERR_CRL_NOT_YET_VALID = 11,
PAL_X509_V_ERR_CRL_HAS_EXPIRED = 12,
PAL_X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD = 13,
PAL_X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD = 14,
PAL_X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD = 15,
PAL_X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD = 16,
PAL_X509_V_ERR_OUT_OF_MEM = 17,
PAL_X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT = 18,
PAL_X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN = 19,
PAL_X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY = 20,
PAL_X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE = 21,
PAL_X509_V_ERR_CERT_CHAIN_TOO_LONG = 22,
PAL_X509_V_ERR_CERT_REVOKED = 23,
// Code 24 varies
PAL_X509_V_ERR_PATH_LENGTH_EXCEEDED = 25,
PAL_X509_V_ERR_INVALID_PURPOSE = 26,
PAL_X509_V_ERR_CERT_UNTRUSTED = 27,
PAL_X509_V_ERR_CERT_REJECTED = 28,
PAL_X509_V_ERR_KEYUSAGE_NO_CERTSIGN = 32,
PAL_X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER = 33,
PAL_X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION = 34,
PAL_X509_V_ERR_KEYUSAGE_NO_CRL_SIGN = 35,
PAL_X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION = 36,
PAL_X509_V_ERR_INVALID_NON_CA = 37,
PAL_X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE = 39,
PAL_X509_V_ERR_INVALID_EXTENSION = 41,
PAL_X509_V_ERR_INVALID_POLICY_EXTENSION = 42,
PAL_X509_V_ERR_NO_EXPLICIT_POLICY = 43,
PAL_X509_V_ERR_DIFFERENT_CRL_SCOPE = 44,
PAL_X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE = 45,
PAL_X509_V_ERR_UNNESTED_RESOURCE = 46,
PAL_X509_V_ERR_PERMITTED_VIOLATION = 47,
PAL_X509_V_ERR_EXCLUDED_VIOLATION = 48,
PAL_X509_V_ERR_SUBTREE_MINMAX = 49,
PAL_X509_V_ERR_APPLICATION_VERIFICATION = 50,
PAL_X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE = 51,
PAL_X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX = 52,
PAL_X509_V_ERR_UNSUPPORTED_NAME_SYNTAX = 53,
PAL_X509_V_ERR_CRL_PATH_VALIDATION_ERROR = 54,
PAL_X509_V_ERR_SUITE_B_INVALID_VERSION = 56,
PAL_X509_V_ERR_SUITE_B_INVALID_ALGORITHM = 57,
PAL_X509_V_ERR_SUITE_B_INVALID_CURVE = 58,
PAL_X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM = 59,
PAL_X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED = 60,
PAL_X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 = 61,
PAL_X509_V_ERR_HOSTNAME_MISMATCH = 62,
PAL_X509_V_ERR_EMAIL_MISMATCH = 63,
PAL_X509_V_ERR_IP_ADDRESS_MISMATCH = 64,
} X509VerifyStatusCode;
typedef int32_t (*X509StoreVerifyCallback)(int32_t, X509_STORE_CTX*);
/*
Function:
GetX509EvpPublicKey
Returns a EVP_PKEY* equivalent to the public key of the certificate.
*/
PALEXPORT EVP_PKEY* CryptoNative_GetX509EvpPublicKey(X509* x509);
/*
Shims the d2i_X509_CRL method and makes it easier to invoke from managed code.
*/
PALEXPORT X509_CRL* CryptoNative_DecodeX509Crl(const uint8_t* buf, int32_t len);
/*
Shims the d2i_X509 method and makes it easier to invoke from managed code.
*/
PALEXPORT X509* CryptoNative_DecodeX509(const uint8_t* buf, int32_t len);
/*
Returns the number of bytes it will take to convert
the X509 to a DER format.
*/
PALEXPORT int32_t CryptoNative_GetX509DerSize(X509* x);
/*
Shims the i2d_X509 method.
Returns the number of bytes written to buf.
*/
PALEXPORT int32_t CryptoNative_EncodeX509(X509* x, uint8_t* buf);
/*
Cleans up and deletes an X509 instance.
Implemented by calling X509_free.
No-op if a is null.
The given X509 pointer is invalid after this call.
Always succeeds.
*/
PALEXPORT void CryptoNative_X509Destroy(X509* a);
/*
Shims the X509_dup method.
Returns the duplicated X509 instance.
*/
PALEXPORT X509* CryptoNative_X509Duplicate(X509* x509);
/*
Shims the PEM_read_bio_X509 method.
Returns the read X509 instance.
*/
PALEXPORT X509* CryptoNative_PemReadX509FromBio(BIO* bio);
/*
Shims the PEM_read_bio_X509_AUX method.
Returns the read X509 instance.
*/
PALEXPORT X509* CryptoNative_PemReadX509FromBioAux(BIO* bio);
/*
Shims the X509_get_serialNumber method.
Returns the ASN1_INTEGER for the serial number.
*/
PALEXPORT ASN1_INTEGER* CryptoNative_X509GetSerialNumber(X509* x509);
/*
Shims the X509_get_issuer_name method.
Returns the ASN1_INTEGER for the issuer name.
*/
PALEXPORT X509_NAME* CryptoNative_X509GetIssuerName(X509* x509);
/*
Shims the X509_get_subject_name method.
Returns the X509_NAME for the subject name.
*/
PALEXPORT X509_NAME* CryptoNative_X509GetSubjectName(X509* x509);
/*
Shims the X509_check_purpose method.
*/
PALEXPORT int32_t CryptoNative_X509CheckPurpose(X509* x, int32_t id, int32_t ca);
/*
Shims the X509_issuer_name_hash method.
*/
PALEXPORT uint64_t CryptoNative_X509IssuerNameHash(X509* x);
/*
Shims the X509_get_ext_count method.
*/
PALEXPORT int32_t CryptoNative_X509GetExtCount(X509* x);
/*
Shims the X509_get_ext method.
*/
PALEXPORT X509_EXTENSION* CryptoNative_X509GetExt(X509* x, int32_t loc);
/*
Shims the X509_EXTENSION_get_object method.
*/
PALEXPORT ASN1_OBJECT* CryptoNative_X509ExtensionGetOid(X509_EXTENSION* x);
/*
Shims the X509_EXTENSION_get_data method.
*/
PALEXPORT ASN1_OCTET_STRING* CryptoNative_X509ExtensionGetData(X509_EXTENSION* x);
/*
Shims the X509_EXTENSION_get_critical method.
*/
PALEXPORT int32_t CryptoNative_X509ExtensionGetCritical(X509_EXTENSION* x);
/*
Returns the data portion of the first matched extension.
*/
PALEXPORT ASN1_OCTET_STRING* CryptoNative_X509FindExtensionData(X509* x, int32_t nid);
/*
Shims the X509_STORE_free method.
*/
PALEXPORT void CryptoNative_X509StoreDestory(X509_STORE* v);
/*
Shims the X509_STORE_add_crl method.
*/
PALEXPORT int32_t CryptoNative_X509StoreAddCrl(X509_STORE* ctx, X509_CRL* x);
/*
Sets the correct flags on the X509_STORE for the specified X509RevocationFlag.
Shims the X509_STORE_set_flags method.
*/
PALEXPORT int32_t CryptoNative_X509StoreSetRevocationFlag(X509_STORE* ctx, X509RevocationFlag revocationFlag);
/*
Shims the X509_STORE_CTX_new method.
*/
PALEXPORT X509_STORE_CTX* CryptoNative_X509StoreCtxCreate(void);
/*
Shims the X509_STORE_CTX_free method.
*/
PALEXPORT void CryptoNative_X509StoreCtxDestroy(X509_STORE_CTX* v);
/*
Shims the X509_STORE_CTX_init method.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxInit(X509_STORE_CTX* ctx,
X509_STORE* store,
X509* x509,
X509Stack* extraStore);
/*
Shims the X509_verify_cert method.
*/
PALEXPORT int32_t CryptoNative_X509VerifyCert(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get1_chain method.
*/
PALEXPORT X509Stack* CryptoNative_X509StoreCtxGetChain(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get_current_cert function.
*/
PALEXPORT X509* CryptoNative_X509StoreCtxGetCurrentCert(X509_STORE_CTX* ctx);
/*
Returns the interior pointer to the "untrusted" certificates collection for this X509_STORE_CTX
*/
PALEXPORT X509Stack* CryptoNative_X509StoreCtxGetSharedUntrusted(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get_error method.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxGetError(X509_STORE_CTX* ctx);
/*
Resets ctx to before the chain was built, preserving the target cert, trust store, extra cert context,
and verify parameters.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxReset(X509_STORE_CTX* ctx);
/*
Reset ctx and rebuild the chain.
Returns -1 if CryptoNative_X509StoreCtxReset failed, otherwise returns the result of
X509_verify_cert.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxRebuildChain(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get_error_depth method.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxGetErrorDepth(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_set_verify_cb function.
*/
PALEXPORT void CryptoNative_X509StoreCtxSetVerifyCallback(X509_STORE_CTX* ctx, X509StoreVerifyCallback callback);
/*
Shims the X509_verify_cert_error_string method.
*/
PALEXPORT const char* CryptoNative_X509VerifyCertErrorString(int32_t n);
/*
Shims the X509_CRL_free method.
*/
PALEXPORT void CryptoNative_X509CrlDestroy(X509_CRL* a);
/*
Shims the PEM_write_bio_X509_CRL method.
Returns the number of bytes written.
*/
PALEXPORT int32_t CryptoNative_PemWriteBioX509Crl(BIO* bio, X509_CRL* crl);
/*
Shims the PEM_read_bio_X509_CRL method.
The new X509_CRL instance.
*/
PALEXPORT X509_CRL* CryptoNative_PemReadBioX509Crl(BIO* bio);
/*
Returns the number of bytes it will take to convert the SubjectPublicKeyInfo
portion of the X509 to DER format.
*/
PALEXPORT int32_t CryptoNative_GetX509SubjectPublicKeyInfoDerSize(X509* x);
/*
Shims the i2d_X509_PUBKEY method, providing X509_get_X509_PUBKEY(x) as the input.
Returns the number of bytes written to buf.
*/
PALEXPORT int32_t CryptoNative_EncodeX509SubjectPublicKeyInfo(X509* x, uint8_t* buf);
/*
Increases the reference count of the X509*, thereby increasing the number of calls
required to the free function.
Unlike X509Duplicate, this modifies an existing object, so no new memory is allocated.
Returns the input value.
*/
PALEXPORT X509* CryptoNative_X509UpRef(X509* x509);
/*
Create a new X509_STORE, considering the certificates from systemTrust and userTrust
*/
PALEXPORT X509_STORE* CryptoNative_X509ChainNew(X509Stack* systemTrust, X509Stack* userTrust);
/*
Adds all of the simple certificates from null-or-empty-password PFX files in storePath to stack.
*/
PALEXPORT int32_t CryptoNative_X509StackAddDirectoryStore(X509Stack* stack, char* storePath);
/*
Adds all of the certificates in src to dest and increases their reference count.
*/
PALEXPORT int32_t CryptoNative_X509StackAddMultiple(X509Stack* dest, X509Stack* src);
/*
Removes any untrusted/extra certificates from the unstrusted collection that are not part of
the current chain to make chain builds after Reset faster.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxCommitToChain(X509_STORE_CTX* storeCtx);
/*
Duplicates any certificate at or below the level where the error marker is.
Outputs a new store with a clone of the root, if necessary.
The new store does not have any properties set other than the trust. (Mainly, CRLs are lost)
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxResetForSignatureError(X509_STORE_CTX* storeCtx, X509_STORE** newStore);
/*
Look for a cached OCSP response appropriate to the end-entity certificate using the issuer as
determined by the chain in storeCtx.
*/
PALEXPORT int32_t CryptoNative_X509ChainGetCachedOcspStatus(X509_STORE_CTX* storeCtx, char* cachePath, int chainDepth);
/*
Build an OCSP request appropriate for the end-entity certificate using the issuer (and trust) as
determined by the chain in storeCtx.
*/
PALEXPORT OCSP_REQUEST* CryptoNative_X509ChainBuildOcspRequest(X509_STORE_CTX* storeCtx, int chainDepth);
/*
Determine if the OCSP response is acceptable, and if acceptable report the status and
cache the result (if appropriate)
*/
PALEXPORT int32_t CryptoNative_X509ChainVerifyOcsp(X509_STORE_CTX* storeCtx,
OCSP_REQUEST* req,
OCSP_RESPONSE* resp,
char* cachePath,
int chainDepth);
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "opensslshim.h"
#include "pal_compiler.h"
#include "pal_crypto_types.h"
/*
These values should be kept in sync with System.Security.Cryptography.X509Certificates.X509RevocationFlag.
*/
typedef enum {
EndCertificateOnly = 0,
EntireChain = 1,
ExcludeRoot = 2,
} X509RevocationFlag;
/*
The error codes used when verifying X509 certificate chains.
These values should be kept in sync with Interop.Crypto.X509VerifyStatusCodeUniversal.
Codes specific to specific versions of OpenSSL can also be returned,
but are not represented in this enum due to their non-constant nature.
*/
typedef enum {
PAL_X509_V_OK = 0,
PAL_X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT = 2,
PAL_X509_V_ERR_UNABLE_TO_GET_CRL = 3,
PAL_X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE = 5,
PAL_X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY = 6,
PAL_X509_V_ERR_CERT_SIGNATURE_FAILURE = 7,
PAL_X509_V_ERR_CRL_SIGNATURE_FAILURE = 8,
PAL_X509_V_ERR_CERT_NOT_YET_VALID = 9,
PAL_X509_V_ERR_CERT_HAS_EXPIRED = 10,
PAL_X509_V_ERR_CRL_NOT_YET_VALID = 11,
PAL_X509_V_ERR_CRL_HAS_EXPIRED = 12,
PAL_X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD = 13,
PAL_X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD = 14,
PAL_X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD = 15,
PAL_X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD = 16,
PAL_X509_V_ERR_OUT_OF_MEM = 17,
PAL_X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT = 18,
PAL_X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN = 19,
PAL_X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY = 20,
PAL_X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE = 21,
PAL_X509_V_ERR_CERT_CHAIN_TOO_LONG = 22,
PAL_X509_V_ERR_CERT_REVOKED = 23,
// Code 24 varies
PAL_X509_V_ERR_PATH_LENGTH_EXCEEDED = 25,
PAL_X509_V_ERR_INVALID_PURPOSE = 26,
PAL_X509_V_ERR_CERT_UNTRUSTED = 27,
PAL_X509_V_ERR_CERT_REJECTED = 28,
PAL_X509_V_ERR_KEYUSAGE_NO_CERTSIGN = 32,
PAL_X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER = 33,
PAL_X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION = 34,
PAL_X509_V_ERR_KEYUSAGE_NO_CRL_SIGN = 35,
PAL_X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION = 36,
PAL_X509_V_ERR_INVALID_NON_CA = 37,
PAL_X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE = 39,
PAL_X509_V_ERR_INVALID_EXTENSION = 41,
PAL_X509_V_ERR_INVALID_POLICY_EXTENSION = 42,
PAL_X509_V_ERR_NO_EXPLICIT_POLICY = 43,
PAL_X509_V_ERR_DIFFERENT_CRL_SCOPE = 44,
PAL_X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE = 45,
PAL_X509_V_ERR_UNNESTED_RESOURCE = 46,
PAL_X509_V_ERR_PERMITTED_VIOLATION = 47,
PAL_X509_V_ERR_EXCLUDED_VIOLATION = 48,
PAL_X509_V_ERR_SUBTREE_MINMAX = 49,
PAL_X509_V_ERR_APPLICATION_VERIFICATION = 50,
PAL_X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE = 51,
PAL_X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX = 52,
PAL_X509_V_ERR_UNSUPPORTED_NAME_SYNTAX = 53,
PAL_X509_V_ERR_CRL_PATH_VALIDATION_ERROR = 54,
PAL_X509_V_ERR_SUITE_B_INVALID_VERSION = 56,
PAL_X509_V_ERR_SUITE_B_INVALID_ALGORITHM = 57,
PAL_X509_V_ERR_SUITE_B_INVALID_CURVE = 58,
PAL_X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM = 59,
PAL_X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED = 60,
PAL_X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 = 61,
PAL_X509_V_ERR_HOSTNAME_MISMATCH = 62,
PAL_X509_V_ERR_EMAIL_MISMATCH = 63,
PAL_X509_V_ERR_IP_ADDRESS_MISMATCH = 64,
} X509VerifyStatusCode;
typedef int32_t (*X509StoreVerifyCallback)(int32_t, X509_STORE_CTX*);
/*
Function:
GetX509EvpPublicKey
Returns a EVP_PKEY* equivalent to the public key of the certificate.
*/
PALEXPORT EVP_PKEY* CryptoNative_GetX509EvpPublicKey(X509* x509);
/*
Shims the d2i_X509_CRL method and makes it easier to invoke from managed code.
*/
PALEXPORT X509_CRL* CryptoNative_DecodeX509Crl(const uint8_t* buf, int32_t len);
/*
Shims the d2i_X509 method and makes it easier to invoke from managed code.
*/
PALEXPORT X509* CryptoNative_DecodeX509(const uint8_t* buf, int32_t len);
/*
Returns the number of bytes it will take to convert
the X509 to a DER format.
*/
PALEXPORT int32_t CryptoNative_GetX509DerSize(X509* x);
/*
Shims the i2d_X509 method.
Returns the number of bytes written to buf.
*/
PALEXPORT int32_t CryptoNative_EncodeX509(X509* x, uint8_t* buf);
/*
Cleans up and deletes an X509 instance.
Implemented by calling X509_free.
No-op if a is null.
The given X509 pointer is invalid after this call.
Always succeeds.
*/
PALEXPORT void CryptoNative_X509Destroy(X509* a);
/*
Shims the X509_dup method.
Returns the duplicated X509 instance.
*/
PALEXPORT X509* CryptoNative_X509Duplicate(X509* x509);
/*
Shims the PEM_read_bio_X509 method.
Returns the read X509 instance.
*/
PALEXPORT X509* CryptoNative_PemReadX509FromBio(BIO* bio);
/*
Shims the PEM_read_bio_X509_AUX method.
Returns the read X509 instance.
*/
PALEXPORT X509* CryptoNative_PemReadX509FromBioAux(BIO* bio);
/*
Shims the X509_get_serialNumber method.
Returns the ASN1_INTEGER for the serial number.
*/
PALEXPORT ASN1_INTEGER* CryptoNative_X509GetSerialNumber(X509* x509);
/*
Shims the X509_get_issuer_name method.
Returns the ASN1_INTEGER for the issuer name.
*/
PALEXPORT X509_NAME* CryptoNative_X509GetIssuerName(X509* x509);
/*
Shims the X509_get_subject_name method.
Returns the X509_NAME for the subject name.
*/
PALEXPORT X509_NAME* CryptoNative_X509GetSubjectName(X509* x509);
/*
Shims the X509_check_purpose method.
*/
PALEXPORT int32_t CryptoNative_X509CheckPurpose(X509* x, int32_t id, int32_t ca);
/*
Shims the X509_issuer_name_hash method.
*/
PALEXPORT uint64_t CryptoNative_X509IssuerNameHash(X509* x);
/*
Shims the X509_get_ext_count method.
*/
PALEXPORT int32_t CryptoNative_X509GetExtCount(X509* x);
/*
Shims the X509_get_ext method.
*/
PALEXPORT X509_EXTENSION* CryptoNative_X509GetExt(X509* x, int32_t loc);
/*
Shims the X509_EXTENSION_get_object method.
*/
PALEXPORT ASN1_OBJECT* CryptoNative_X509ExtensionGetOid(X509_EXTENSION* x);
/*
Shims the X509_EXTENSION_get_data method.
*/
PALEXPORT ASN1_OCTET_STRING* CryptoNative_X509ExtensionGetData(X509_EXTENSION* x);
/*
Shims the X509_EXTENSION_get_critical method.
*/
PALEXPORT int32_t CryptoNative_X509ExtensionGetCritical(X509_EXTENSION* x);
/*
Returns the data portion of the first matched extension.
*/
PALEXPORT ASN1_OCTET_STRING* CryptoNative_X509FindExtensionData(X509* x, int32_t nid);
/*
Shims the X509_STORE_free method.
*/
PALEXPORT void CryptoNative_X509StoreDestory(X509_STORE* v);
/*
Shims the X509_STORE_add_crl method.
*/
PALEXPORT int32_t CryptoNative_X509StoreAddCrl(X509_STORE* ctx, X509_CRL* x);
/*
Sets the correct flags on the X509_STORE for the specified X509RevocationFlag.
Shims the X509_STORE_set_flags method.
*/
PALEXPORT int32_t CryptoNative_X509StoreSetRevocationFlag(X509_STORE* ctx, X509RevocationFlag revocationFlag);
/*
Shims the X509_STORE_CTX_new method.
*/
PALEXPORT X509_STORE_CTX* CryptoNative_X509StoreCtxCreate(void);
/*
Shims the X509_STORE_CTX_free method.
*/
PALEXPORT void CryptoNative_X509StoreCtxDestroy(X509_STORE_CTX* v);
/*
Shims the X509_STORE_CTX_init method.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxInit(X509_STORE_CTX* ctx,
X509_STORE* store,
X509* x509,
X509Stack* extraStore);
/*
Shims the X509_verify_cert method.
*/
PALEXPORT int32_t CryptoNative_X509VerifyCert(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get1_chain method.
*/
PALEXPORT X509Stack* CryptoNative_X509StoreCtxGetChain(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get_current_cert function.
*/
PALEXPORT X509* CryptoNative_X509StoreCtxGetCurrentCert(X509_STORE_CTX* ctx);
/*
Returns the interior pointer to the "untrusted" certificates collection for this X509_STORE_CTX
*/
PALEXPORT X509Stack* CryptoNative_X509StoreCtxGetSharedUntrusted(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get_error method.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxGetError(X509_STORE_CTX* ctx);
/*
Resets ctx to before the chain was built, preserving the target cert, trust store, extra cert context,
and verify parameters.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxReset(X509_STORE_CTX* ctx);
/*
Reset ctx and rebuild the chain.
Returns -1 if CryptoNative_X509StoreCtxReset failed, otherwise returns the result of
X509_verify_cert.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxRebuildChain(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_get_error_depth method.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxGetErrorDepth(X509_STORE_CTX* ctx);
/*
Shims the X509_STORE_CTX_set_verify_cb function.
*/
PALEXPORT void CryptoNative_X509StoreCtxSetVerifyCallback(X509_STORE_CTX* ctx, X509StoreVerifyCallback callback);
/*
Shims the X509_verify_cert_error_string method.
*/
PALEXPORT const char* CryptoNative_X509VerifyCertErrorString(int32_t n);
/*
Shims the X509_CRL_free method.
*/
PALEXPORT void CryptoNative_X509CrlDestroy(X509_CRL* a);
/*
Shims the PEM_write_bio_X509_CRL method.
Returns the number of bytes written.
*/
PALEXPORT int32_t CryptoNative_PemWriteBioX509Crl(BIO* bio, X509_CRL* crl);
/*
Shims the PEM_read_bio_X509_CRL method.
The new X509_CRL instance.
*/
PALEXPORT X509_CRL* CryptoNative_PemReadBioX509Crl(BIO* bio);
/*
Returns the number of bytes it will take to convert the SubjectPublicKeyInfo
portion of the X509 to DER format.
*/
PALEXPORT int32_t CryptoNative_GetX509SubjectPublicKeyInfoDerSize(X509* x);
/*
Shims the i2d_X509_PUBKEY method, providing X509_get_X509_PUBKEY(x) as the input.
Returns the number of bytes written to buf.
*/
PALEXPORT int32_t CryptoNative_EncodeX509SubjectPublicKeyInfo(X509* x, uint8_t* buf);
/*
Increases the reference count of the X509*, thereby increasing the number of calls
required to the free function.
Unlike X509Duplicate, this modifies an existing object, so no new memory is allocated.
Returns the input value.
*/
PALEXPORT X509* CryptoNative_X509UpRef(X509* x509);
/*
Create a new X509_STORE, considering the certificates from systemTrust and userTrust
*/
PALEXPORT X509_STORE* CryptoNative_X509ChainNew(X509Stack* systemTrust, X509Stack* userTrust);
/*
Adds all of the simple certificates from null-or-empty-password PFX files in storePath to stack.
*/
PALEXPORT int32_t CryptoNative_X509StackAddDirectoryStore(X509Stack* stack, char* storePath);
/*
Adds all of the certificates in src to dest and increases their reference count.
*/
PALEXPORT int32_t CryptoNative_X509StackAddMultiple(X509Stack* dest, X509Stack* src);
/*
Removes any untrusted/extra certificates from the unstrusted collection that are not part of
the current chain to make chain builds after Reset faster.
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxCommitToChain(X509_STORE_CTX* storeCtx);
/*
Duplicates any certificate at or below the level where the error marker is.
Outputs a new store with a clone of the root, if necessary.
The new store does not have any properties set other than the trust. (Mainly, CRLs are lost)
*/
PALEXPORT int32_t CryptoNative_X509StoreCtxResetForSignatureError(X509_STORE_CTX* storeCtx, X509_STORE** newStore);
/*
Look for a cached OCSP response appropriate to the end-entity certificate using the issuer as
determined by the chain in storeCtx.
*/
PALEXPORT int32_t CryptoNative_X509ChainGetCachedOcspStatus(X509_STORE_CTX* storeCtx, char* cachePath, int chainDepth);
/*
Build an OCSP request appropriate for the end-entity certificate using the issuer (and trust) as
determined by the chain in storeCtx.
*/
PALEXPORT OCSP_REQUEST* CryptoNative_X509ChainBuildOcspRequest(X509_STORE_CTX* storeCtx, int chainDepth);
/*
Determine if the OCSP response is acceptable, and if acceptable report the status and
cache the result (if appropriate)
*/
PALEXPORT int32_t CryptoNative_X509ChainVerifyOcsp(X509_STORE_CTX* storeCtx,
OCSP_REQUEST* req,
OCSP_RESPONSE* resp,
char* cachePath,
int chainDepth);
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/pal/src/libunwind/src/aarch64/offsets.h | /* Linux-specific definitions: */
/* Define various structure offsets to simplify cross-compilation. */
/* Offsets for AArch64 Linux "ucontext_t": */
#define LINUX_UC_FLAGS_OFF 0x0
#define LINUX_UC_LINK_OFF 0x8
#define LINUX_UC_STACK_OFF 0x10
#define LINUX_UC_SIGMASK_OFF 0x28
#define LINUX_UC_MCONTEXT_OFF 0xb0
/* Offsets for AArch64 Linux "struct sigcontext": */
#define LINUX_SC_FAULTADDRESS_OFF 0x00
#define LINUX_SC_X0_OFF 0x008
#define LINUX_SC_X1_OFF 0x010
#define LINUX_SC_X2_OFF 0x018
#define LINUX_SC_X3_OFF 0x020
#define LINUX_SC_X4_OFF 0x028
#define LINUX_SC_X5_OFF 0x030
#define LINUX_SC_X6_OFF 0x038
#define LINUX_SC_X7_OFF 0x040
#define LINUX_SC_X8_OFF 0x048
#define LINUX_SC_X9_OFF 0x050
#define LINUX_SC_X10_OFF 0x058
#define LINUX_SC_X11_OFF 0x060
#define LINUX_SC_X12_OFF 0x068
#define LINUX_SC_X13_OFF 0x070
#define LINUX_SC_X14_OFF 0x078
#define LINUX_SC_X15_OFF 0x080
#define LINUX_SC_X16_OFF 0x088
#define LINUX_SC_X17_OFF 0x090
#define LINUX_SC_X18_OFF 0x098
#define LINUX_SC_X19_OFF 0x0a0
#define LINUX_SC_X20_OFF 0x0a8
#define LINUX_SC_X21_OFF 0x0b0
#define LINUX_SC_X22_OFF 0x0b8
#define LINUX_SC_X23_OFF 0x0c0
#define LINUX_SC_X24_OFF 0x0c8
#define LINUX_SC_X25_OFF 0x0d0
#define LINUX_SC_X26_OFF 0x0d8
#define LINUX_SC_X27_OFF 0x0e0
#define LINUX_SC_X28_OFF 0x0e8
#define LINUX_SC_X29_OFF 0x0f0
#define LINUX_SC_X30_OFF 0x0f8
#define LINUX_SC_SP_OFF 0x100
#define LINUX_SC_PC_OFF 0x108
#define LINUX_SC_PSTATE_OFF 0x110
| /* Linux-specific definitions: */
/* Define various structure offsets to simplify cross-compilation. */
/* Offsets for AArch64 Linux "ucontext_t": */
#define LINUX_UC_FLAGS_OFF 0x0
#define LINUX_UC_LINK_OFF 0x8
#define LINUX_UC_STACK_OFF 0x10
#define LINUX_UC_SIGMASK_OFF 0x28
#define LINUX_UC_MCONTEXT_OFF 0xb0
/* Offsets for AArch64 Linux "struct sigcontext": */
#define LINUX_SC_FAULTADDRESS_OFF 0x00
#define LINUX_SC_X0_OFF 0x008
#define LINUX_SC_X1_OFF 0x010
#define LINUX_SC_X2_OFF 0x018
#define LINUX_SC_X3_OFF 0x020
#define LINUX_SC_X4_OFF 0x028
#define LINUX_SC_X5_OFF 0x030
#define LINUX_SC_X6_OFF 0x038
#define LINUX_SC_X7_OFF 0x040
#define LINUX_SC_X8_OFF 0x048
#define LINUX_SC_X9_OFF 0x050
#define LINUX_SC_X10_OFF 0x058
#define LINUX_SC_X11_OFF 0x060
#define LINUX_SC_X12_OFF 0x068
#define LINUX_SC_X13_OFF 0x070
#define LINUX_SC_X14_OFF 0x078
#define LINUX_SC_X15_OFF 0x080
#define LINUX_SC_X16_OFF 0x088
#define LINUX_SC_X17_OFF 0x090
#define LINUX_SC_X18_OFF 0x098
#define LINUX_SC_X19_OFF 0x0a0
#define LINUX_SC_X20_OFF 0x0a8
#define LINUX_SC_X21_OFF 0x0b0
#define LINUX_SC_X22_OFF 0x0b8
#define LINUX_SC_X23_OFF 0x0c0
#define LINUX_SC_X24_OFF 0x0c8
#define LINUX_SC_X25_OFF 0x0d0
#define LINUX_SC_X26_OFF 0x0d8
#define LINUX_SC_X27_OFF 0x0e0
#define LINUX_SC_X28_OFF 0x0e8
#define LINUX_SC_X29_OFF 0x0f0
#define LINUX_SC_X30_OFF 0x0f8
#define LINUX_SC_SP_OFF 0x100
#define LINUX_SC_PC_OFF 0x108
#define LINUX_SC_PSTATE_OFF 0x110
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/external/brotli/enc/hash_longest_match_quickly_inc.h | /* NOLINT(build/header_guard) */
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, BUCKET_BITS, BUCKET_SWEEP_BITS, HASH_LEN,
USE_DICTIONARY
*/
#define HashLongestMatchQuickly HASHER()
#define BUCKET_SIZE (1 << BUCKET_BITS)
#define BUCKET_MASK (BUCKET_SIZE - 1)
#define BUCKET_SWEEP (1 << BUCKET_SWEEP_BITS)
#define BUCKET_SWEEP_MASK ((BUCKET_SWEEP - 1) << 3)
static BROTLI_INLINE size_t FN(HashTypeLength)(void) { return 8; }
static BROTLI_INLINE size_t FN(StoreLookahead)(void) { return 8; }
/* HashBytes is the function that chooses the bucket to place
the address in. The HashLongestMatch and HashLongestMatchQuickly
classes have separate, different implementations of hashing. */
static uint32_t FN(HashBytes)(const uint8_t* data) {
const uint64_t h = ((BROTLI_UNALIGNED_LOAD64LE(data) << (64 - 8 * HASH_LEN)) *
kHashMul64);
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return (uint32_t)(h >> (64 - BUCKET_BITS));
}
/* A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (BUCKET_SIZE). */
typedef struct HashLongestMatchQuickly {
/* Shortcuts. */
HasherCommon* common;
/* --- Dynamic size members --- */
uint32_t* buckets_; /* uint32_t[BUCKET_SIZE]; */
} HashLongestMatchQuickly;
static void FN(Initialize)(
HasherCommon* common, HashLongestMatchQuickly* BROTLI_RESTRICT self,
const BrotliEncoderParams* params) {
self->common = common;
BROTLI_UNUSED(params);
self->buckets_ = (uint32_t*)common->extra;
}
static void FN(Prepare)(
HashLongestMatchQuickly* BROTLI_RESTRICT self, BROTLI_BOOL one_shot,
size_t input_size, const uint8_t* BROTLI_RESTRICT data) {
uint32_t* BROTLI_RESTRICT buckets = self->buckets_;
/* Partial preparation is 100 times slower (per socket). */
size_t partial_prepare_threshold = BUCKET_SIZE >> 5;
if (one_shot && input_size <= partial_prepare_threshold) {
size_t i;
for (i = 0; i < input_size; ++i) {
const uint32_t key = FN(HashBytes)(&data[i]);
if (BUCKET_SWEEP == 1) {
buckets[key] = 0;
} else {
uint32_t j;
for (j = 0; j < BUCKET_SWEEP; ++j) {
buckets[(key + (j << 3)) & BUCKET_MASK] = 0;
}
}
}
} else {
/* It is not strictly necessary to fill this buffer here, but
not filling will make the results of the compression stochastic
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
memset(buckets, 0, sizeof(uint32_t) * BUCKET_SIZE);
}
}
static BROTLI_INLINE size_t FN(HashMemAllocInBytes)(
const BrotliEncoderParams* params, BROTLI_BOOL one_shot,
size_t input_size) {
BROTLI_UNUSED(params);
BROTLI_UNUSED(one_shot);
BROTLI_UNUSED(input_size);
return sizeof(uint32_t) * BUCKET_SIZE;
}
/* Look at 5 bytes at &data[ix & mask].
Compute a hash from these, and store the value somewhere within
[ix .. ix+3]. */
static BROTLI_INLINE void FN(Store)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
const uint8_t* BROTLI_RESTRICT data, const size_t mask, const size_t ix) {
const uint32_t key = FN(HashBytes)(&data[ix & mask]);
if (BUCKET_SWEEP == 1) {
self->buckets_[key] = (uint32_t)ix;
} else {
/* Wiggle the value with the bucket sweep range. */
const uint32_t off = ix & BUCKET_SWEEP_MASK;
self->buckets_[(key + off) & BUCKET_MASK] = (uint32_t)ix;
}
}
static BROTLI_INLINE void FN(StoreRange)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
const uint8_t* BROTLI_RESTRICT data, const size_t mask,
const size_t ix_start, const size_t ix_end) {
size_t i;
for (i = ix_start; i < ix_end; ++i) {
FN(Store)(self, data, mask, i);
}
}
static BROTLI_INLINE void FN(StitchToPreviousBlock)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
size_t num_bytes, size_t position,
const uint8_t* ringbuffer, size_t ringbuffer_mask) {
if (num_bytes >= FN(HashTypeLength)() - 1 && position >= 3) {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
FN(Store)(self, ringbuffer, ringbuffer_mask, position - 3);
FN(Store)(self, ringbuffer, ringbuffer_mask, position - 2);
FN(Store)(self, ringbuffer, ringbuffer_mask, position - 1);
}
}
static BROTLI_INLINE void FN(PrepareDistanceCache)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
int* BROTLI_RESTRICT distance_cache) {
BROTLI_UNUSED(self);
BROTLI_UNUSED(distance_cache);
}
/* Find a longest backward match of &data[cur_ix & ring_buffer_mask]
up to the length of max_length and stores the position cur_ix in the
hash table.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
static BROTLI_INLINE void FN(FindLongestMatch)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
const BrotliEncoderDictionary* dictionary,
const uint8_t* BROTLI_RESTRICT data,
const size_t ring_buffer_mask, const int* BROTLI_RESTRICT distance_cache,
const size_t cur_ix, const size_t max_length, const size_t max_backward,
const size_t dictionary_distance, const size_t max_distance,
HasherSearchResult* BROTLI_RESTRICT out) {
uint32_t* BROTLI_RESTRICT buckets = self->buckets_;
const size_t best_len_in = out->len;
const size_t cur_ix_masked = cur_ix & ring_buffer_mask;
int compare_char = data[cur_ix_masked + best_len_in];
size_t key = FN(HashBytes)(&data[cur_ix_masked]);
size_t key_out;
score_t min_score = out->score;
score_t best_score = out->score;
size_t best_len = best_len_in;
size_t cached_backward = (size_t)distance_cache[0];
size_t prev_ix = cur_ix - cached_backward;
out->len_code_delta = 0;
if (prev_ix < cur_ix) {
prev_ix &= (uint32_t)ring_buffer_mask;
if (compare_char == data[prev_ix + best_len]) {
const size_t len = FindMatchLengthWithLimit(
&data[prev_ix], &data[cur_ix_masked], max_length);
if (len >= 4) {
const score_t score = BackwardReferenceScoreUsingLastDistance(len);
if (best_score < score) {
out->len = len;
out->distance = cached_backward;
out->score = score;
if (BUCKET_SWEEP == 1) {
buckets[key] = (uint32_t)cur_ix;
return;
} else {
best_len = len;
best_score = score;
compare_char = data[cur_ix_masked + len];
}
}
}
}
}
if (BUCKET_SWEEP == 1) {
size_t backward;
size_t len;
/* Only one to look for, don't bother to prepare for a loop. */
prev_ix = buckets[key];
buckets[key] = (uint32_t)cur_ix;
backward = cur_ix - prev_ix;
prev_ix &= (uint32_t)ring_buffer_mask;
if (compare_char != data[prev_ix + best_len_in]) {
return;
}
if (BROTLI_PREDICT_FALSE(backward == 0 || backward > max_backward)) {
return;
}
len = FindMatchLengthWithLimit(&data[prev_ix],
&data[cur_ix_masked],
max_length);
if (len >= 4) {
const score_t score = BackwardReferenceScore(len, backward);
if (best_score < score) {
out->len = len;
out->distance = backward;
out->score = score;
return;
}
}
} else {
size_t keys[BUCKET_SWEEP];
size_t i;
for (i = 0; i < BUCKET_SWEEP; ++i) {
keys[i] = (key + (i << 3)) & BUCKET_MASK;
}
key_out = keys[(cur_ix & BUCKET_SWEEP_MASK) >> 3];
for (i = 0; i < BUCKET_SWEEP; ++i) {
size_t len;
size_t backward;
prev_ix = buckets[keys[i]];
backward = cur_ix - prev_ix;
prev_ix &= (uint32_t)ring_buffer_mask;
if (compare_char != data[prev_ix + best_len]) {
continue;
}
if (BROTLI_PREDICT_FALSE(backward == 0 || backward > max_backward)) {
continue;
}
len = FindMatchLengthWithLimit(&data[prev_ix],
&data[cur_ix_masked],
max_length);
if (len >= 4) {
const score_t score = BackwardReferenceScore(len, backward);
if (best_score < score) {
best_len = len;
out->len = len;
compare_char = data[cur_ix_masked + len];
best_score = score;
out->score = score;
out->distance = backward;
}
}
}
}
if (USE_DICTIONARY && min_score == out->score) {
SearchInStaticDictionary(dictionary,
self->common, &data[cur_ix_masked], max_length, dictionary_distance,
max_distance, out, BROTLI_TRUE);
}
if (BUCKET_SWEEP != 1) {
buckets[key_out] = (uint32_t)cur_ix;
}
}
#undef BUCKET_SWEEP_MASK
#undef BUCKET_SWEEP
#undef BUCKET_MASK
#undef BUCKET_SIZE
#undef HashLongestMatchQuickly
| /* NOLINT(build/header_guard) */
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN, BUCKET_BITS, BUCKET_SWEEP_BITS, HASH_LEN,
USE_DICTIONARY
*/
#define HashLongestMatchQuickly HASHER()
#define BUCKET_SIZE (1 << BUCKET_BITS)
#define BUCKET_MASK (BUCKET_SIZE - 1)
#define BUCKET_SWEEP (1 << BUCKET_SWEEP_BITS)
#define BUCKET_SWEEP_MASK ((BUCKET_SWEEP - 1) << 3)
static BROTLI_INLINE size_t FN(HashTypeLength)(void) { return 8; }
static BROTLI_INLINE size_t FN(StoreLookahead)(void) { return 8; }
/* HashBytes is the function that chooses the bucket to place
the address in. The HashLongestMatch and HashLongestMatchQuickly
classes have separate, different implementations of hashing. */
static uint32_t FN(HashBytes)(const uint8_t* data) {
const uint64_t h = ((BROTLI_UNALIGNED_LOAD64LE(data) << (64 - 8 * HASH_LEN)) *
kHashMul64);
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return (uint32_t)(h >> (64 - BUCKET_BITS));
}
/* A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (BUCKET_SIZE). */
typedef struct HashLongestMatchQuickly {
/* Shortcuts. */
HasherCommon* common;
/* --- Dynamic size members --- */
uint32_t* buckets_; /* uint32_t[BUCKET_SIZE]; */
} HashLongestMatchQuickly;
static void FN(Initialize)(
HasherCommon* common, HashLongestMatchQuickly* BROTLI_RESTRICT self,
const BrotliEncoderParams* params) {
self->common = common;
BROTLI_UNUSED(params);
self->buckets_ = (uint32_t*)common->extra;
}
static void FN(Prepare)(
HashLongestMatchQuickly* BROTLI_RESTRICT self, BROTLI_BOOL one_shot,
size_t input_size, const uint8_t* BROTLI_RESTRICT data) {
uint32_t* BROTLI_RESTRICT buckets = self->buckets_;
/* Partial preparation is 100 times slower (per socket). */
size_t partial_prepare_threshold = BUCKET_SIZE >> 5;
if (one_shot && input_size <= partial_prepare_threshold) {
size_t i;
for (i = 0; i < input_size; ++i) {
const uint32_t key = FN(HashBytes)(&data[i]);
if (BUCKET_SWEEP == 1) {
buckets[key] = 0;
} else {
uint32_t j;
for (j = 0; j < BUCKET_SWEEP; ++j) {
buckets[(key + (j << 3)) & BUCKET_MASK] = 0;
}
}
}
} else {
/* It is not strictly necessary to fill this buffer here, but
not filling will make the results of the compression stochastic
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
memset(buckets, 0, sizeof(uint32_t) * BUCKET_SIZE);
}
}
static BROTLI_INLINE size_t FN(HashMemAllocInBytes)(
const BrotliEncoderParams* params, BROTLI_BOOL one_shot,
size_t input_size) {
BROTLI_UNUSED(params);
BROTLI_UNUSED(one_shot);
BROTLI_UNUSED(input_size);
return sizeof(uint32_t) * BUCKET_SIZE;
}
/* Look at 5 bytes at &data[ix & mask].
Compute a hash from these, and store the value somewhere within
[ix .. ix+3]. */
static BROTLI_INLINE void FN(Store)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
const uint8_t* BROTLI_RESTRICT data, const size_t mask, const size_t ix) {
const uint32_t key = FN(HashBytes)(&data[ix & mask]);
if (BUCKET_SWEEP == 1) {
self->buckets_[key] = (uint32_t)ix;
} else {
/* Wiggle the value with the bucket sweep range. */
const uint32_t off = ix & BUCKET_SWEEP_MASK;
self->buckets_[(key + off) & BUCKET_MASK] = (uint32_t)ix;
}
}
static BROTLI_INLINE void FN(StoreRange)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
const uint8_t* BROTLI_RESTRICT data, const size_t mask,
const size_t ix_start, const size_t ix_end) {
size_t i;
for (i = ix_start; i < ix_end; ++i) {
FN(Store)(self, data, mask, i);
}
}
static BROTLI_INLINE void FN(StitchToPreviousBlock)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
size_t num_bytes, size_t position,
const uint8_t* ringbuffer, size_t ringbuffer_mask) {
if (num_bytes >= FN(HashTypeLength)() - 1 && position >= 3) {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
FN(Store)(self, ringbuffer, ringbuffer_mask, position - 3);
FN(Store)(self, ringbuffer, ringbuffer_mask, position - 2);
FN(Store)(self, ringbuffer, ringbuffer_mask, position - 1);
}
}
static BROTLI_INLINE void FN(PrepareDistanceCache)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
int* BROTLI_RESTRICT distance_cache) {
BROTLI_UNUSED(self);
BROTLI_UNUSED(distance_cache);
}
/* Find a longest backward match of &data[cur_ix & ring_buffer_mask]
up to the length of max_length and stores the position cur_ix in the
hash table.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
static BROTLI_INLINE void FN(FindLongestMatch)(
HashLongestMatchQuickly* BROTLI_RESTRICT self,
const BrotliEncoderDictionary* dictionary,
const uint8_t* BROTLI_RESTRICT data,
const size_t ring_buffer_mask, const int* BROTLI_RESTRICT distance_cache,
const size_t cur_ix, const size_t max_length, const size_t max_backward,
const size_t dictionary_distance, const size_t max_distance,
HasherSearchResult* BROTLI_RESTRICT out) {
uint32_t* BROTLI_RESTRICT buckets = self->buckets_;
const size_t best_len_in = out->len;
const size_t cur_ix_masked = cur_ix & ring_buffer_mask;
int compare_char = data[cur_ix_masked + best_len_in];
size_t key = FN(HashBytes)(&data[cur_ix_masked]);
size_t key_out;
score_t min_score = out->score;
score_t best_score = out->score;
size_t best_len = best_len_in;
size_t cached_backward = (size_t)distance_cache[0];
size_t prev_ix = cur_ix - cached_backward;
out->len_code_delta = 0;
if (prev_ix < cur_ix) {
prev_ix &= (uint32_t)ring_buffer_mask;
if (compare_char == data[prev_ix + best_len]) {
const size_t len = FindMatchLengthWithLimit(
&data[prev_ix], &data[cur_ix_masked], max_length);
if (len >= 4) {
const score_t score = BackwardReferenceScoreUsingLastDistance(len);
if (best_score < score) {
out->len = len;
out->distance = cached_backward;
out->score = score;
if (BUCKET_SWEEP == 1) {
buckets[key] = (uint32_t)cur_ix;
return;
} else {
best_len = len;
best_score = score;
compare_char = data[cur_ix_masked + len];
}
}
}
}
}
if (BUCKET_SWEEP == 1) {
size_t backward;
size_t len;
/* Only one to look for, don't bother to prepare for a loop. */
prev_ix = buckets[key];
buckets[key] = (uint32_t)cur_ix;
backward = cur_ix - prev_ix;
prev_ix &= (uint32_t)ring_buffer_mask;
if (compare_char != data[prev_ix + best_len_in]) {
return;
}
if (BROTLI_PREDICT_FALSE(backward == 0 || backward > max_backward)) {
return;
}
len = FindMatchLengthWithLimit(&data[prev_ix],
&data[cur_ix_masked],
max_length);
if (len >= 4) {
const score_t score = BackwardReferenceScore(len, backward);
if (best_score < score) {
out->len = len;
out->distance = backward;
out->score = score;
return;
}
}
} else {
size_t keys[BUCKET_SWEEP];
size_t i;
for (i = 0; i < BUCKET_SWEEP; ++i) {
keys[i] = (key + (i << 3)) & BUCKET_MASK;
}
key_out = keys[(cur_ix & BUCKET_SWEEP_MASK) >> 3];
for (i = 0; i < BUCKET_SWEEP; ++i) {
size_t len;
size_t backward;
prev_ix = buckets[keys[i]];
backward = cur_ix - prev_ix;
prev_ix &= (uint32_t)ring_buffer_mask;
if (compare_char != data[prev_ix + best_len]) {
continue;
}
if (BROTLI_PREDICT_FALSE(backward == 0 || backward > max_backward)) {
continue;
}
len = FindMatchLengthWithLimit(&data[prev_ix],
&data[cur_ix_masked],
max_length);
if (len >= 4) {
const score_t score = BackwardReferenceScore(len, backward);
if (best_score < score) {
best_len = len;
out->len = len;
compare_char = data[cur_ix_masked + len];
best_score = score;
out->score = score;
out->distance = backward;
}
}
}
}
if (USE_DICTIONARY && min_score == out->score) {
SearchInStaticDictionary(dictionary,
self->common, &data[cur_ix_masked], max_length, dictionary_distance,
max_distance, out, BROTLI_TRUE);
}
if (BUCKET_SWEEP != 1) {
buckets[key_out] = (uint32_t)cur_ix;
}
}
#undef BUCKET_SWEEP_MASK
#undef BUCKET_SWEEP
#undef BUCKET_MASK
#undef BUCKET_SIZE
#undef HashLongestMatchQuickly
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/external/brotli/enc/entropy_encode.c | /* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Entropy encoding (Huffman) utilities. */
#include "./entropy_encode.h"
#include <string.h> /* memset */
#include "../common/constants.h"
#include "../common/platform.h"
#include <brotli/types.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
const size_t kBrotliShellGaps[] = {132, 57, 23, 10, 4, 1};
BROTLI_BOOL BrotliSetDepth(
int p0, HuffmanTree* pool, uint8_t* depth, int max_depth) {
int stack[16];
int level = 0;
int p = p0;
BROTLI_DCHECK(max_depth <= 15);
stack[0] = -1;
while (BROTLI_TRUE) {
if (pool[p].index_left_ >= 0) {
level++;
if (level > max_depth) return BROTLI_FALSE;
stack[level] = pool[p].index_right_or_value_;
p = pool[p].index_left_;
continue;
} else {
depth[pool[p].index_right_or_value_] = (uint8_t)level;
}
while (level >= 0 && stack[level] == -1) level--;
if (level < 0) return BROTLI_TRUE;
p = stack[level];
stack[level] = -1;
}
}
/* Sort the root nodes, least popular first. */
static BROTLI_INLINE BROTLI_BOOL SortHuffmanTree(
const HuffmanTree* v0, const HuffmanTree* v1) {
if (v0->total_count_ != v1->total_count_) {
return TO_BROTLI_BOOL(v0->total_count_ < v1->total_count_);
}
return TO_BROTLI_BOOL(v0->index_right_or_value_ > v1->index_right_or_value_);
}
/* This function will create a Huffman tree.
The catch here is that the tree cannot be arbitrarily deep.
Brotli specifies a maximum depth of 15 bits for "code trees"
and 7 bits for "code length code trees."
count_limit is the value that is to be faked as the minimum value
and this minimum value is raised until the tree matches the
maximum length requirement.
This algorithm is not of excellent performance for very long data blocks,
especially when population counts are longer than 2**tree_limit, but
we are not planning to use this with extremely long blocks.
See http://en.wikipedia.org/wiki/Huffman_coding */
void BrotliCreateHuffmanTree(const uint32_t* data,
const size_t length,
const int tree_limit,
HuffmanTree* tree,
uint8_t* depth) {
uint32_t count_limit;
HuffmanTree sentinel;
InitHuffmanTree(&sentinel, BROTLI_UINT32_MAX, -1, -1);
/* For block sizes below 64 kB, we never need to do a second iteration
of this loop. Probably all of our block sizes will be smaller than
that, so this loop is mostly of academic interest. If we actually
would need this, we would be better off with the Katajainen algorithm. */
for (count_limit = 1; ; count_limit *= 2) {
size_t n = 0;
size_t i;
size_t j;
size_t k;
for (i = length; i != 0;) {
--i;
if (data[i]) {
const uint32_t count = BROTLI_MAX(uint32_t, data[i], count_limit);
InitHuffmanTree(&tree[n++], count, -1, (int16_t)i);
}
}
if (n == 1) {
depth[tree[0].index_right_or_value_] = 1; /* Only one element. */
break;
}
SortHuffmanTreeItems(tree, n, SortHuffmanTree);
/* The nodes are:
[0, n): the sorted leaf nodes that we start with.
[n]: we add a sentinel here.
[n + 1, 2n): new parent nodes are added here, starting from
(n+1). These are naturally in ascending order.
[2n]: we add a sentinel at the end as well.
There will be (2n+1) elements at the end. */
tree[n] = sentinel;
tree[n + 1] = sentinel;
i = 0; /* Points to the next leaf node. */
j = n + 1; /* Points to the next non-leaf node. */
for (k = n - 1; k != 0; --k) {
size_t left, right;
if (tree[i].total_count_ <= tree[j].total_count_) {
left = i;
++i;
} else {
left = j;
++j;
}
if (tree[i].total_count_ <= tree[j].total_count_) {
right = i;
++i;
} else {
right = j;
++j;
}
{
/* The sentinel node becomes the parent node. */
size_t j_end = 2 * n - k;
tree[j_end].total_count_ =
tree[left].total_count_ + tree[right].total_count_;
tree[j_end].index_left_ = (int16_t)left;
tree[j_end].index_right_or_value_ = (int16_t)right;
/* Add back the last sentinel node. */
tree[j_end + 1] = sentinel;
}
}
if (BrotliSetDepth((int)(2 * n - 1), &tree[0], depth, tree_limit)) {
/* We need to pack the Huffman tree in tree_limit bits. If this was not
successful, add fake entities to the lowest values and retry. */
break;
}
}
}
static void Reverse(uint8_t* v, size_t start, size_t end) {
--end;
while (start < end) {
uint8_t tmp = v[start];
v[start] = v[end];
v[end] = tmp;
++start;
--end;
}
}
static void BrotliWriteHuffmanTreeRepetitions(
const uint8_t previous_value,
const uint8_t value,
size_t repetitions,
size_t* tree_size,
uint8_t* tree,
uint8_t* extra_bits_data) {
BROTLI_DCHECK(repetitions > 0);
if (previous_value != value) {
tree[*tree_size] = value;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
--repetitions;
}
if (repetitions == 7) {
tree[*tree_size] = value;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
--repetitions;
}
if (repetitions < 3) {
size_t i;
for (i = 0; i < repetitions; ++i) {
tree[*tree_size] = value;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
}
} else {
size_t start = *tree_size;
repetitions -= 3;
while (BROTLI_TRUE) {
tree[*tree_size] = BROTLI_REPEAT_PREVIOUS_CODE_LENGTH;
extra_bits_data[*tree_size] = repetitions & 0x3;
++(*tree_size);
repetitions >>= 2;
if (repetitions == 0) {
break;
}
--repetitions;
}
Reverse(tree, start, *tree_size);
Reverse(extra_bits_data, start, *tree_size);
}
}
static void BrotliWriteHuffmanTreeRepetitionsZeros(
size_t repetitions,
size_t* tree_size,
uint8_t* tree,
uint8_t* extra_bits_data) {
if (repetitions == 11) {
tree[*tree_size] = 0;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
--repetitions;
}
if (repetitions < 3) {
size_t i;
for (i = 0; i < repetitions; ++i) {
tree[*tree_size] = 0;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
}
} else {
size_t start = *tree_size;
repetitions -= 3;
while (BROTLI_TRUE) {
tree[*tree_size] = BROTLI_REPEAT_ZERO_CODE_LENGTH;
extra_bits_data[*tree_size] = repetitions & 0x7;
++(*tree_size);
repetitions >>= 3;
if (repetitions == 0) {
break;
}
--repetitions;
}
Reverse(tree, start, *tree_size);
Reverse(extra_bits_data, start, *tree_size);
}
}
void BrotliOptimizeHuffmanCountsForRle(size_t length, uint32_t* counts,
uint8_t* good_for_rle) {
size_t nonzero_count = 0;
size_t stride;
size_t limit;
size_t sum;
const size_t streak_limit = 1240;
/* Let's make the Huffman code more compatible with RLE encoding. */
size_t i;
for (i = 0; i < length; i++) {
if (counts[i]) {
++nonzero_count;
}
}
if (nonzero_count < 16) {
return;
}
while (length != 0 && counts[length - 1] == 0) {
--length;
}
if (length == 0) {
return; /* All zeros. */
}
/* Now counts[0..length - 1] does not have trailing zeros. */
{
size_t nonzeros = 0;
uint32_t smallest_nonzero = 1 << 30;
for (i = 0; i < length; ++i) {
if (counts[i] != 0) {
++nonzeros;
if (smallest_nonzero > counts[i]) {
smallest_nonzero = counts[i];
}
}
}
if (nonzeros < 5) {
/* Small histogram will model it well. */
return;
}
if (smallest_nonzero < 4) {
size_t zeros = length - nonzeros;
if (zeros < 6) {
for (i = 1; i < length - 1; ++i) {
if (counts[i - 1] != 0 && counts[i] == 0 && counts[i + 1] != 0) {
counts[i] = 1;
}
}
}
}
if (nonzeros < 28) {
return;
}
}
/* 2) Let's mark all population counts that already can be encoded
with an RLE code. */
memset(good_for_rle, 0, length);
{
/* Let's not spoil any of the existing good RLE codes.
Mark any seq of 0's that is longer as 5 as a good_for_rle.
Mark any seq of non-0's that is longer as 7 as a good_for_rle. */
uint32_t symbol = counts[0];
size_t step = 0;
for (i = 0; i <= length; ++i) {
if (i == length || counts[i] != symbol) {
if ((symbol == 0 && step >= 5) ||
(symbol != 0 && step >= 7)) {
size_t k;
for (k = 0; k < step; ++k) {
good_for_rle[i - k - 1] = 1;
}
}
step = 1;
if (i != length) {
symbol = counts[i];
}
} else {
++step;
}
}
}
/* 3) Let's replace those population counts that lead to more RLE codes.
Math here is in 24.8 fixed point representation. */
stride = 0;
limit = 256 * (counts[0] + counts[1] + counts[2]) / 3 + 420;
sum = 0;
for (i = 0; i <= length; ++i) {
if (i == length || good_for_rle[i] ||
(i != 0 && good_for_rle[i - 1]) ||
(256 * counts[i] - limit + streak_limit) >= 2 * streak_limit) {
if (stride >= 4 || (stride >= 3 && sum == 0)) {
size_t k;
/* The stride must end, collapse what we have, if we have enough (4). */
size_t count = (sum + stride / 2) / stride;
if (count == 0) {
count = 1;
}
if (sum == 0) {
/* Don't make an all zeros stride to be upgraded to ones. */
count = 0;
}
for (k = 0; k < stride; ++k) {
/* We don't want to change value at counts[i],
that is already belonging to the next stride. Thus - 1. */
counts[i - k - 1] = (uint32_t)count;
}
}
stride = 0;
sum = 0;
if (i < length - 2) {
/* All interesting strides have a count of at least 4, */
/* at least when non-zeros. */
limit = 256 * (counts[i] + counts[i + 1] + counts[i + 2]) / 3 + 420;
} else if (i < length) {
limit = 256 * counts[i];
} else {
limit = 0;
}
}
++stride;
if (i != length) {
sum += counts[i];
if (stride >= 4) {
limit = (256 * sum + stride / 2) / stride;
}
if (stride == 4) {
limit += 120;
}
}
}
}
static void DecideOverRleUse(const uint8_t* depth, const size_t length,
BROTLI_BOOL* use_rle_for_non_zero,
BROTLI_BOOL* use_rle_for_zero) {
size_t total_reps_zero = 0;
size_t total_reps_non_zero = 0;
size_t count_reps_zero = 1;
size_t count_reps_non_zero = 1;
size_t i;
for (i = 0; i < length;) {
const uint8_t value = depth[i];
size_t reps = 1;
size_t k;
for (k = i + 1; k < length && depth[k] == value; ++k) {
++reps;
}
if (reps >= 3 && value == 0) {
total_reps_zero += reps;
++count_reps_zero;
}
if (reps >= 4 && value != 0) {
total_reps_non_zero += reps;
++count_reps_non_zero;
}
i += reps;
}
*use_rle_for_non_zero =
TO_BROTLI_BOOL(total_reps_non_zero > count_reps_non_zero * 2);
*use_rle_for_zero = TO_BROTLI_BOOL(total_reps_zero > count_reps_zero * 2);
}
void BrotliWriteHuffmanTree(const uint8_t* depth,
size_t length,
size_t* tree_size,
uint8_t* tree,
uint8_t* extra_bits_data) {
uint8_t previous_value = BROTLI_INITIAL_REPEATED_CODE_LENGTH;
size_t i;
BROTLI_BOOL use_rle_for_non_zero = BROTLI_FALSE;
BROTLI_BOOL use_rle_for_zero = BROTLI_FALSE;
/* Throw away trailing zeros. */
size_t new_length = length;
for (i = 0; i < length; ++i) {
if (depth[length - i - 1] == 0) {
--new_length;
} else {
break;
}
}
/* First gather statistics on if it is a good idea to do RLE. */
if (length > 50) {
/* Find RLE coding for longer codes.
Shorter codes seem not to benefit from RLE. */
DecideOverRleUse(depth, new_length,
&use_rle_for_non_zero, &use_rle_for_zero);
}
/* Actual RLE coding. */
for (i = 0; i < new_length;) {
const uint8_t value = depth[i];
size_t reps = 1;
if ((value != 0 && use_rle_for_non_zero) ||
(value == 0 && use_rle_for_zero)) {
size_t k;
for (k = i + 1; k < new_length && depth[k] == value; ++k) {
++reps;
}
}
if (value == 0) {
BrotliWriteHuffmanTreeRepetitionsZeros(
reps, tree_size, tree, extra_bits_data);
} else {
BrotliWriteHuffmanTreeRepetitions(previous_value,
value, reps, tree_size,
tree, extra_bits_data);
previous_value = value;
}
i += reps;
}
}
static uint16_t BrotliReverseBits(size_t num_bits, uint16_t bits) {
static const size_t kLut[16] = { /* Pre-reversed 4-bit values. */
0x00, 0x08, 0x04, 0x0C, 0x02, 0x0A, 0x06, 0x0E,
0x01, 0x09, 0x05, 0x0D, 0x03, 0x0B, 0x07, 0x0F
};
size_t retval = kLut[bits & 0x0F];
size_t i;
for (i = 4; i < num_bits; i += 4) {
retval <<= 4;
bits = (uint16_t)(bits >> 4);
retval |= kLut[bits & 0x0F];
}
retval >>= ((0 - num_bits) & 0x03);
return (uint16_t)retval;
}
/* 0..15 are values for bits */
#define MAX_HUFFMAN_BITS 16
void BrotliConvertBitDepthsToSymbols(const uint8_t* depth,
size_t len,
uint16_t* bits) {
/* In Brotli, all bit depths are [1..15]
0 bit depth means that the symbol does not exist. */
uint16_t bl_count[MAX_HUFFMAN_BITS] = { 0 };
uint16_t next_code[MAX_HUFFMAN_BITS];
size_t i;
int code = 0;
for (i = 0; i < len; ++i) {
++bl_count[depth[i]];
}
bl_count[0] = 0;
next_code[0] = 0;
for (i = 1; i < MAX_HUFFMAN_BITS; ++i) {
code = (code + bl_count[i - 1]) << 1;
next_code[i] = (uint16_t)code;
}
for (i = 0; i < len; ++i) {
if (depth[i]) {
bits[i] = BrotliReverseBits(depth[i], next_code[depth[i]]++);
}
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
| /* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Entropy encoding (Huffman) utilities. */
#include "./entropy_encode.h"
#include <string.h> /* memset */
#include "../common/constants.h"
#include "../common/platform.h"
#include <brotli/types.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
const size_t kBrotliShellGaps[] = {132, 57, 23, 10, 4, 1};
BROTLI_BOOL BrotliSetDepth(
int p0, HuffmanTree* pool, uint8_t* depth, int max_depth) {
int stack[16];
int level = 0;
int p = p0;
BROTLI_DCHECK(max_depth <= 15);
stack[0] = -1;
while (BROTLI_TRUE) {
if (pool[p].index_left_ >= 0) {
level++;
if (level > max_depth) return BROTLI_FALSE;
stack[level] = pool[p].index_right_or_value_;
p = pool[p].index_left_;
continue;
} else {
depth[pool[p].index_right_or_value_] = (uint8_t)level;
}
while (level >= 0 && stack[level] == -1) level--;
if (level < 0) return BROTLI_TRUE;
p = stack[level];
stack[level] = -1;
}
}
/* Sort the root nodes, least popular first. */
static BROTLI_INLINE BROTLI_BOOL SortHuffmanTree(
const HuffmanTree* v0, const HuffmanTree* v1) {
if (v0->total_count_ != v1->total_count_) {
return TO_BROTLI_BOOL(v0->total_count_ < v1->total_count_);
}
return TO_BROTLI_BOOL(v0->index_right_or_value_ > v1->index_right_or_value_);
}
/* This function will create a Huffman tree.
The catch here is that the tree cannot be arbitrarily deep.
Brotli specifies a maximum depth of 15 bits for "code trees"
and 7 bits for "code length code trees."
count_limit is the value that is to be faked as the minimum value
and this minimum value is raised until the tree matches the
maximum length requirement.
This algorithm is not of excellent performance for very long data blocks,
especially when population counts are longer than 2**tree_limit, but
we are not planning to use this with extremely long blocks.
See http://en.wikipedia.org/wiki/Huffman_coding */
void BrotliCreateHuffmanTree(const uint32_t* data,
const size_t length,
const int tree_limit,
HuffmanTree* tree,
uint8_t* depth) {
uint32_t count_limit;
HuffmanTree sentinel;
InitHuffmanTree(&sentinel, BROTLI_UINT32_MAX, -1, -1);
/* For block sizes below 64 kB, we never need to do a second iteration
of this loop. Probably all of our block sizes will be smaller than
that, so this loop is mostly of academic interest. If we actually
would need this, we would be better off with the Katajainen algorithm. */
for (count_limit = 1; ; count_limit *= 2) {
size_t n = 0;
size_t i;
size_t j;
size_t k;
for (i = length; i != 0;) {
--i;
if (data[i]) {
const uint32_t count = BROTLI_MAX(uint32_t, data[i], count_limit);
InitHuffmanTree(&tree[n++], count, -1, (int16_t)i);
}
}
if (n == 1) {
depth[tree[0].index_right_or_value_] = 1; /* Only one element. */
break;
}
SortHuffmanTreeItems(tree, n, SortHuffmanTree);
/* The nodes are:
[0, n): the sorted leaf nodes that we start with.
[n]: we add a sentinel here.
[n + 1, 2n): new parent nodes are added here, starting from
(n+1). These are naturally in ascending order.
[2n]: we add a sentinel at the end as well.
There will be (2n+1) elements at the end. */
tree[n] = sentinel;
tree[n + 1] = sentinel;
i = 0; /* Points to the next leaf node. */
j = n + 1; /* Points to the next non-leaf node. */
for (k = n - 1; k != 0; --k) {
size_t left, right;
if (tree[i].total_count_ <= tree[j].total_count_) {
left = i;
++i;
} else {
left = j;
++j;
}
if (tree[i].total_count_ <= tree[j].total_count_) {
right = i;
++i;
} else {
right = j;
++j;
}
{
/* The sentinel node becomes the parent node. */
size_t j_end = 2 * n - k;
tree[j_end].total_count_ =
tree[left].total_count_ + tree[right].total_count_;
tree[j_end].index_left_ = (int16_t)left;
tree[j_end].index_right_or_value_ = (int16_t)right;
/* Add back the last sentinel node. */
tree[j_end + 1] = sentinel;
}
}
if (BrotliSetDepth((int)(2 * n - 1), &tree[0], depth, tree_limit)) {
/* We need to pack the Huffman tree in tree_limit bits. If this was not
successful, add fake entities to the lowest values and retry. */
break;
}
}
}
static void Reverse(uint8_t* v, size_t start, size_t end) {
--end;
while (start < end) {
uint8_t tmp = v[start];
v[start] = v[end];
v[end] = tmp;
++start;
--end;
}
}
static void BrotliWriteHuffmanTreeRepetitions(
const uint8_t previous_value,
const uint8_t value,
size_t repetitions,
size_t* tree_size,
uint8_t* tree,
uint8_t* extra_bits_data) {
BROTLI_DCHECK(repetitions > 0);
if (previous_value != value) {
tree[*tree_size] = value;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
--repetitions;
}
if (repetitions == 7) {
tree[*tree_size] = value;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
--repetitions;
}
if (repetitions < 3) {
size_t i;
for (i = 0; i < repetitions; ++i) {
tree[*tree_size] = value;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
}
} else {
size_t start = *tree_size;
repetitions -= 3;
while (BROTLI_TRUE) {
tree[*tree_size] = BROTLI_REPEAT_PREVIOUS_CODE_LENGTH;
extra_bits_data[*tree_size] = repetitions & 0x3;
++(*tree_size);
repetitions >>= 2;
if (repetitions == 0) {
break;
}
--repetitions;
}
Reverse(tree, start, *tree_size);
Reverse(extra_bits_data, start, *tree_size);
}
}
static void BrotliWriteHuffmanTreeRepetitionsZeros(
size_t repetitions,
size_t* tree_size,
uint8_t* tree,
uint8_t* extra_bits_data) {
if (repetitions == 11) {
tree[*tree_size] = 0;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
--repetitions;
}
if (repetitions < 3) {
size_t i;
for (i = 0; i < repetitions; ++i) {
tree[*tree_size] = 0;
extra_bits_data[*tree_size] = 0;
++(*tree_size);
}
} else {
size_t start = *tree_size;
repetitions -= 3;
while (BROTLI_TRUE) {
tree[*tree_size] = BROTLI_REPEAT_ZERO_CODE_LENGTH;
extra_bits_data[*tree_size] = repetitions & 0x7;
++(*tree_size);
repetitions >>= 3;
if (repetitions == 0) {
break;
}
--repetitions;
}
Reverse(tree, start, *tree_size);
Reverse(extra_bits_data, start, *tree_size);
}
}
void BrotliOptimizeHuffmanCountsForRle(size_t length, uint32_t* counts,
uint8_t* good_for_rle) {
size_t nonzero_count = 0;
size_t stride;
size_t limit;
size_t sum;
const size_t streak_limit = 1240;
/* Let's make the Huffman code more compatible with RLE encoding. */
size_t i;
for (i = 0; i < length; i++) {
if (counts[i]) {
++nonzero_count;
}
}
if (nonzero_count < 16) {
return;
}
while (length != 0 && counts[length - 1] == 0) {
--length;
}
if (length == 0) {
return; /* All zeros. */
}
/* Now counts[0..length - 1] does not have trailing zeros. */
{
size_t nonzeros = 0;
uint32_t smallest_nonzero = 1 << 30;
for (i = 0; i < length; ++i) {
if (counts[i] != 0) {
++nonzeros;
if (smallest_nonzero > counts[i]) {
smallest_nonzero = counts[i];
}
}
}
if (nonzeros < 5) {
/* Small histogram will model it well. */
return;
}
if (smallest_nonzero < 4) {
size_t zeros = length - nonzeros;
if (zeros < 6) {
for (i = 1; i < length - 1; ++i) {
if (counts[i - 1] != 0 && counts[i] == 0 && counts[i + 1] != 0) {
counts[i] = 1;
}
}
}
}
if (nonzeros < 28) {
return;
}
}
/* 2) Let's mark all population counts that already can be encoded
with an RLE code. */
memset(good_for_rle, 0, length);
{
/* Let's not spoil any of the existing good RLE codes.
Mark any seq of 0's that is longer as 5 as a good_for_rle.
Mark any seq of non-0's that is longer as 7 as a good_for_rle. */
uint32_t symbol = counts[0];
size_t step = 0;
for (i = 0; i <= length; ++i) {
if (i == length || counts[i] != symbol) {
if ((symbol == 0 && step >= 5) ||
(symbol != 0 && step >= 7)) {
size_t k;
for (k = 0; k < step; ++k) {
good_for_rle[i - k - 1] = 1;
}
}
step = 1;
if (i != length) {
symbol = counts[i];
}
} else {
++step;
}
}
}
/* 3) Let's replace those population counts that lead to more RLE codes.
Math here is in 24.8 fixed point representation. */
stride = 0;
limit = 256 * (counts[0] + counts[1] + counts[2]) / 3 + 420;
sum = 0;
for (i = 0; i <= length; ++i) {
if (i == length || good_for_rle[i] ||
(i != 0 && good_for_rle[i - 1]) ||
(256 * counts[i] - limit + streak_limit) >= 2 * streak_limit) {
if (stride >= 4 || (stride >= 3 && sum == 0)) {
size_t k;
/* The stride must end, collapse what we have, if we have enough (4). */
size_t count = (sum + stride / 2) / stride;
if (count == 0) {
count = 1;
}
if (sum == 0) {
/* Don't make an all zeros stride to be upgraded to ones. */
count = 0;
}
for (k = 0; k < stride; ++k) {
/* We don't want to change value at counts[i],
that is already belonging to the next stride. Thus - 1. */
counts[i - k - 1] = (uint32_t)count;
}
}
stride = 0;
sum = 0;
if (i < length - 2) {
/* All interesting strides have a count of at least 4, */
/* at least when non-zeros. */
limit = 256 * (counts[i] + counts[i + 1] + counts[i + 2]) / 3 + 420;
} else if (i < length) {
limit = 256 * counts[i];
} else {
limit = 0;
}
}
++stride;
if (i != length) {
sum += counts[i];
if (stride >= 4) {
limit = (256 * sum + stride / 2) / stride;
}
if (stride == 4) {
limit += 120;
}
}
}
}
static void DecideOverRleUse(const uint8_t* depth, const size_t length,
BROTLI_BOOL* use_rle_for_non_zero,
BROTLI_BOOL* use_rle_for_zero) {
size_t total_reps_zero = 0;
size_t total_reps_non_zero = 0;
size_t count_reps_zero = 1;
size_t count_reps_non_zero = 1;
size_t i;
for (i = 0; i < length;) {
const uint8_t value = depth[i];
size_t reps = 1;
size_t k;
for (k = i + 1; k < length && depth[k] == value; ++k) {
++reps;
}
if (reps >= 3 && value == 0) {
total_reps_zero += reps;
++count_reps_zero;
}
if (reps >= 4 && value != 0) {
total_reps_non_zero += reps;
++count_reps_non_zero;
}
i += reps;
}
*use_rle_for_non_zero =
TO_BROTLI_BOOL(total_reps_non_zero > count_reps_non_zero * 2);
*use_rle_for_zero = TO_BROTLI_BOOL(total_reps_zero > count_reps_zero * 2);
}
void BrotliWriteHuffmanTree(const uint8_t* depth,
size_t length,
size_t* tree_size,
uint8_t* tree,
uint8_t* extra_bits_data) {
uint8_t previous_value = BROTLI_INITIAL_REPEATED_CODE_LENGTH;
size_t i;
BROTLI_BOOL use_rle_for_non_zero = BROTLI_FALSE;
BROTLI_BOOL use_rle_for_zero = BROTLI_FALSE;
/* Throw away trailing zeros. */
size_t new_length = length;
for (i = 0; i < length; ++i) {
if (depth[length - i - 1] == 0) {
--new_length;
} else {
break;
}
}
/* First gather statistics on if it is a good idea to do RLE. */
if (length > 50) {
/* Find RLE coding for longer codes.
Shorter codes seem not to benefit from RLE. */
DecideOverRleUse(depth, new_length,
&use_rle_for_non_zero, &use_rle_for_zero);
}
/* Actual RLE coding. */
for (i = 0; i < new_length;) {
const uint8_t value = depth[i];
size_t reps = 1;
if ((value != 0 && use_rle_for_non_zero) ||
(value == 0 && use_rle_for_zero)) {
size_t k;
for (k = i + 1; k < new_length && depth[k] == value; ++k) {
++reps;
}
}
if (value == 0) {
BrotliWriteHuffmanTreeRepetitionsZeros(
reps, tree_size, tree, extra_bits_data);
} else {
BrotliWriteHuffmanTreeRepetitions(previous_value,
value, reps, tree_size,
tree, extra_bits_data);
previous_value = value;
}
i += reps;
}
}
static uint16_t BrotliReverseBits(size_t num_bits, uint16_t bits) {
static const size_t kLut[16] = { /* Pre-reversed 4-bit values. */
0x00, 0x08, 0x04, 0x0C, 0x02, 0x0A, 0x06, 0x0E,
0x01, 0x09, 0x05, 0x0D, 0x03, 0x0B, 0x07, 0x0F
};
size_t retval = kLut[bits & 0x0F];
size_t i;
for (i = 4; i < num_bits; i += 4) {
retval <<= 4;
bits = (uint16_t)(bits >> 4);
retval |= kLut[bits & 0x0F];
}
retval >>= ((0 - num_bits) & 0x03);
return (uint16_t)retval;
}
/* 0..15 are values for bits */
#define MAX_HUFFMAN_BITS 16
void BrotliConvertBitDepthsToSymbols(const uint8_t* depth,
size_t len,
uint16_t* bits) {
/* In Brotli, all bit depths are [1..15]
0 bit depth means that the symbol does not exist. */
uint16_t bl_count[MAX_HUFFMAN_BITS] = { 0 };
uint16_t next_code[MAX_HUFFMAN_BITS];
size_t i;
int code = 0;
for (i = 0; i < len; ++i) {
++bl_count[depth[i]];
}
bl_count[0] = 0;
next_code[0] = 0;
for (i = 1; i < MAX_HUFFMAN_BITS; ++i) {
code = (code + bl_count[i - 1]) << 1;
next_code[i] = (uint16_t)code;
}
for (i = 0; i < len; ++i) {
if (depth[i]) {
bits[i] = BrotliReverseBits(depth[i], next_code[depth[i]]++);
}
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/alias-analysis.c | /**
* \file
* Implement simple alias analysis for local variables.
*
* Author:
* Rodrigo Kumpera ([email protected])
*
* (C) 2013 Xamarin
*/
#include <config.h>
#include <stdio.h>
#include "mini.h"
#include "ir-emit.h"
#include "glib.h"
#include <mono/utils/mono-compiler.h>
#ifndef DISABLE_JIT
static gboolean
is_int_stack_size (int type)
{
#if TARGET_SIZEOF_VOID_P == 4
return type == STACK_I4 || type == STACK_MP || type == STACK_PTR;
#else
return type == STACK_I4;
#endif
}
static gboolean
is_long_stack_size (int type)
{
#if TARGET_SIZEOF_VOID_P == 8
return type == STACK_I8 || type == STACK_MP || type == STACK_PTR;
#else
return type == STACK_I8;
#endif
}
static gboolean
lower_load (MonoCompile *cfg, MonoInst *load, MonoInst *ldaddr)
{
MonoInst *var = (MonoInst *)ldaddr->inst_p0;
MonoType *type = m_class_get_byval_arg (var->klass);
int replaced_op = mono_type_to_load_membase (cfg, type);
if (load->opcode == OP_LOADV_MEMBASE && load->klass != var->klass) {
if (cfg->verbose_level > 2)
printf ("Incompatible load_vtype classes %s x %s\n", m_class_get_name (load->klass), m_class_get_name (var->klass));
return FALSE;
}
if (replaced_op != load->opcode) {
if (cfg->verbose_level > 2)
printf ("Incompatible load type: expected %s but got %s\n",
mono_inst_name (replaced_op),
mono_inst_name (load->opcode));
return FALSE;
} else {
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (load); }
}
load->opcode = mono_type_to_regmove (cfg, type);
mini_type_to_eval_stack_type (cfg, type, load);
load->sreg1 = var->dreg;
mono_atomic_inc_i32 (&mono_jit_stats.loads_eliminated);
return TRUE;
}
static gboolean
lower_store (MonoCompile *cfg, MonoInst *store, MonoInst *ldaddr)
{
MonoInst *var = (MonoInst *)ldaddr->inst_p0;
MonoType *type = m_class_get_byval_arg (var->klass);
int replaced_op = mono_type_to_store_membase (cfg, type);
if (store->opcode == OP_STOREV_MEMBASE && store->klass != var->klass) {
if (cfg->verbose_level > 2)
printf ("Incompatible store_vtype classes %s x %s\n", m_class_get_name (store->klass), m_class_get_name (store->klass));
return FALSE;
}
if (replaced_op != store->opcode) {
if (cfg->verbose_level > 2)
printf ("Incompatible store_reg type: expected %s but got %s\n",
mono_inst_name (replaced_op),
mono_inst_name (store->opcode));
return FALSE;
} else {
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (store); }
}
int coerce_op = mono_type_to_stloc_coerce (type);
if (coerce_op)
store->opcode = coerce_op;
else
store->opcode = mono_type_to_regmove (cfg, type);
mini_type_to_eval_stack_type (cfg, type, store);
store->dreg = var->dreg;
mono_atomic_inc_i32 (&mono_jit_stats.stores_eliminated);
return TRUE;
}
static gboolean
lower_store_imm (MonoCompile *cfg, MonoInst *store, MonoInst *ldaddr)
{
MonoInst *var = (MonoInst *)ldaddr->inst_p0;
MonoType *type = m_class_get_byval_arg (var->klass);
int store_op = mono_type_to_store_membase (cfg, type);
if (store_op == OP_STOREV_MEMBASE || store_op == OP_STOREX_MEMBASE)
return FALSE;
switch (store->opcode) {
#if TARGET_SIZEOF_VOID_P == 4
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI4_MEMBASE_IMM:
if (!is_int_stack_size (var->type)) {
if (cfg->verbose_level > 2) printf ("Incompatible variable of size != 4\n");
return FALSE;
}
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (store); }
store->opcode = OP_ICONST;
store->type = STACK_I4;
store->dreg = var->dreg;
store->inst_c0 = store->inst_imm;
break;
#if TARGET_SIZEOF_VOID_P == 8
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
if (!is_long_stack_size (var->type)) {
if (cfg->verbose_level > 2) printf ("Incompatible variable of size != 8\n");
return FALSE;
}
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (store); }
store->opcode = OP_I8CONST;
store->type = STACK_I8;
store->dreg = var->dreg;
store->inst_l = store->inst_imm;
break;
default:
return FALSE;
}
mono_atomic_inc_i32 (&mono_jit_stats.stores_eliminated);
return TRUE;
}
static void
kill_call_arg_alias (MonoCompile *cfg, GHashTable *addr_loads, GSList *l)
{
for (; l; l = l->next) {
MonoInst *tmp;
guint32 regpair, reg;
regpair = (guint32)(gssize)(l->data);
reg = regpair & 0xffffff;
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (reg));
if (tmp) {
// This call passes an alias as an argument. This means that the contents
// of the passed pointer can change. If the content is also an alias then
// we need to forget it as we do for moves.
if (g_hash_table_remove (addr_loads, GINT_TO_POINTER (((MonoInst*)tmp->inst_p0)->dreg))) {
if (cfg->verbose_level > 2)
printf ("Killed alias %d\n", ((MonoInst*)tmp->inst_p0)->dreg);
}
}
}
}
static gboolean
lower_memory_access (MonoCompile *cfg)
{
MonoBasicBlock *bb;
MonoInst *ins, *tmp;
gboolean needs_dce = FALSE;
GHashTable *addr_loads = g_hash_table_new (NULL, NULL);
//FIXME optimize
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
g_hash_table_remove_all (addr_loads);
for (ins = bb->code; ins; ins = ins->next) {
handle_instruction:
switch (ins->opcode) {
case OP_LDADDR: {
MonoInst *var = (MonoInst*)ins->inst_p0;
if (var->flags & MONO_INST_VOLATILE) {
if (cfg->verbose_level > 2) { printf ("Found address to volatile var, can't take it: "); mono_print_ins (ins); }
} else {
g_hash_table_insert (addr_loads, GINT_TO_POINTER (ins->dreg), ins);
if (cfg->verbose_level > 2) { printf ("New address: "); mono_print_ins (ins); }
}
break;
}
case OP_MOVE:
tmp = (MonoInst*)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->sreg1));
/*
Forward propagate known aliases
ldaddr R10 <- R8
mov R11 <- R10
*/
if (tmp) {
g_hash_table_insert (addr_loads, GINT_TO_POINTER (ins->dreg), tmp);
if (cfg->verbose_level > 2) { printf ("New alias: "); mono_print_ins (ins); }
} else {
/*
Source value is not a know address, kill the variable.
*/
if (g_hash_table_remove (addr_loads, GINT_TO_POINTER (ins->dreg))) {
if (cfg->verbose_level > 2) { printf ("Killed alias: "); mono_print_ins (ins); }
}
}
break;
case OP_LOADV_MEMBASE:
case OP_LOAD_MEMBASE:
case OP_LOADU1_MEMBASE:
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADI1_MEMBASE:
case OP_LOADI8_MEMBASE:
#ifndef MONO_ARCH_SOFT_FLOAT_FALLBACK
case OP_LOADR4_MEMBASE:
#endif
case OP_LOADR8_MEMBASE:
if (ins->inst_offset != 0)
continue;
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->sreg1));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found candidate load:"); mono_print_ins (ins); }
if (lower_load (cfg, ins, tmp)) {
needs_dce = TRUE;
/* Try to propagate known aliases if an OP_MOVE was inserted */
goto handle_instruction;
}
}
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI2_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG:
#ifndef MONO_ARCH_SOFT_FLOAT_FALLBACK
case OP_STORER4_MEMBASE_REG:
#endif
case OP_STORER8_MEMBASE_REG:
case OP_STOREV_MEMBASE:
tmp = NULL;
if (ins->opcode == OP_STOREV_MEMBASE) {
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->dreg));
if (tmp)
ins->flags |= MONO_INST_STACK_STORE;
}
if (ins->inst_offset != 0)
continue;
if (!tmp)
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->dreg));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found candidate store:"); mono_print_ins (ins); }
if (lower_store (cfg, ins, tmp)) {
needs_dce = TRUE;
/* Try to propagate known aliases if an OP_MOVE was inserted */
goto handle_instruction;
}
}
break;
//FIXME missing storei1_membase_imm and storei2_membase_imm
case OP_STORE_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM:
if (ins->inst_offset != 0)
continue;
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->dreg));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found candidate store-imm:"); mono_print_ins (ins); }
needs_dce |= lower_store_imm (cfg, ins, tmp);
}
break;
case OP_CHECK_THIS:
case OP_NOT_NULL:
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->sreg1));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found null check over local: "); mono_print_ins (ins); }
NULLIFY_INS (ins);
needs_dce = TRUE;
}
break;
default: {
if (MONO_IS_CALL (ins)) {
MonoCallInst *call = (MonoCallInst*)ins;
kill_call_arg_alias (cfg, addr_loads, call->out_ireg_args);
}
// FIXME Kill more aliases if used as dreg, since we are not in ssa form.
// This would need some optimizations so we don't lookup hash table for every
// instruction
break;
}
}
}
}
g_hash_table_destroy (addr_loads);
/* There could be ldaddr instructions which already got eliminated */
if (COMPILE_LLVM (cfg))
return TRUE;
return needs_dce;
}
static gboolean
recompute_aliased_variables (MonoCompile *cfg, int *restored_vars)
{
int i;
MonoBasicBlock *bb;
MonoInst *ins;
int kills = 0;
int adds = 0;
*restored_vars = 0;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *var = cfg->varinfo [i];
if (var->flags & MONO_INST_INDIRECT) {
if (cfg->verbose_level > 2) {
printf ("Killing :"); mono_print_ins (var);
}
++kills;
}
var->flags &= ~MONO_INST_INDIRECT;
}
if (!kills)
return FALSE;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
for (ins = bb->code; ins; ins = ins->next) {
if (ins->opcode == OP_LDADDR) {
MonoInst *var;
if (cfg->verbose_level > 2) { printf ("Found op :"); mono_print_ins (ins); }
var = (MonoInst*)ins->inst_p0;
if (!(var->flags & MONO_INST_INDIRECT)) {
if (cfg->verbose_level > 1) { printf ("Restoring :"); mono_print_ins (var); }
++adds;
}
var->flags |= MONO_INST_INDIRECT;
}
}
}
*restored_vars = adds;
mono_atomic_fetch_add_i32 (&mono_jit_stats.alias_found, kills);
mono_atomic_fetch_add_i32 (&mono_jit_stats.alias_removed, kills - adds);
if (kills > adds) {
if (cfg->verbose_level > 2) {
printf ("Method: %s\n", mono_method_full_name (cfg->method, 1));
printf ("Kills %d Adds %d\n", kills, adds);
}
return TRUE;
}
return FALSE;
}
/*
FIXME:
Don't DCE on the whole CFG, only the BBs that have changed.
TODO:
SRVT of small types can fix cases of mismatch for fields of a different type than the component.
Handle aliasing of byrefs in call conventions.
*/
void
mono_local_alias_analysis (MonoCompile *cfg)
{
int i, restored_vars = 1;
if (!cfg->has_indirection)
return;
if (cfg->verbose_level > 2)
mono_print_code (cfg, "BEFORE ALIAS_ANALYSIS");
/*
Remove indirection and memory access of known variables.
*/
if (!lower_memory_access (cfg))
goto done;
/*
By replacing indirect access with direct operations, some LDADDR ops become dead. Kill them.
*/
if (cfg->opt & MONO_OPT_DEADCE)
mono_local_deadce (cfg);
/*
Some variables no longer need to be flagged as indirect, find them.
Since indirect vars are converted into global vregs, each pass eliminates only one level of indirection.
Most cases only need one pass and some 2.
*/
for (i = 0; i < 3 && restored_vars > 0 && recompute_aliased_variables (cfg, &restored_vars); ++i) {
/*
A lot of simplification just took place, we recompute local variables and do DCE to
really profit from the previous gains
*/
mono_handle_global_vregs (cfg);
if (cfg->opt & MONO_OPT_DEADCE)
mono_local_deadce (cfg);
}
done:
if (cfg->verbose_level > 2)
mono_print_code (cfg, "AFTER ALIAS_ANALYSIS");
}
#else /* !DISABLE_JIT */
MONO_EMPTY_SOURCE_FILE (alias_analysis);
#endif /* !DISABLE_JIT */
| /**
* \file
* Implement simple alias analysis for local variables.
*
* Author:
* Rodrigo Kumpera ([email protected])
*
* (C) 2013 Xamarin
*/
#include <config.h>
#include <stdio.h>
#include "mini.h"
#include "ir-emit.h"
#include "glib.h"
#include <mono/utils/mono-compiler.h>
#ifndef DISABLE_JIT
static gboolean
is_int_stack_size (int type)
{
#if TARGET_SIZEOF_VOID_P == 4
return type == STACK_I4 || type == STACK_MP || type == STACK_PTR;
#else
return type == STACK_I4;
#endif
}
static gboolean
is_long_stack_size (int type)
{
#if TARGET_SIZEOF_VOID_P == 8
return type == STACK_I8 || type == STACK_MP || type == STACK_PTR;
#else
return type == STACK_I8;
#endif
}
static gboolean
lower_load (MonoCompile *cfg, MonoInst *load, MonoInst *ldaddr)
{
MonoInst *var = (MonoInst *)ldaddr->inst_p0;
MonoType *type = m_class_get_byval_arg (var->klass);
int replaced_op = mono_type_to_load_membase (cfg, type);
if (load->opcode == OP_LOADV_MEMBASE && load->klass != var->klass) {
if (cfg->verbose_level > 2)
printf ("Incompatible load_vtype classes %s x %s\n", m_class_get_name (load->klass), m_class_get_name (var->klass));
return FALSE;
}
if (replaced_op != load->opcode) {
if (cfg->verbose_level > 2)
printf ("Incompatible load type: expected %s but got %s\n",
mono_inst_name (replaced_op),
mono_inst_name (load->opcode));
return FALSE;
} else {
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (load); }
}
load->opcode = mono_type_to_regmove (cfg, type);
mini_type_to_eval_stack_type (cfg, type, load);
load->sreg1 = var->dreg;
mono_atomic_inc_i32 (&mono_jit_stats.loads_eliminated);
return TRUE;
}
static gboolean
lower_store (MonoCompile *cfg, MonoInst *store, MonoInst *ldaddr)
{
MonoInst *var = (MonoInst *)ldaddr->inst_p0;
MonoType *type = m_class_get_byval_arg (var->klass);
int replaced_op = mono_type_to_store_membase (cfg, type);
if (store->opcode == OP_STOREV_MEMBASE && store->klass != var->klass) {
if (cfg->verbose_level > 2)
printf ("Incompatible store_vtype classes %s x %s\n", m_class_get_name (store->klass), m_class_get_name (store->klass));
return FALSE;
}
if (replaced_op != store->opcode) {
if (cfg->verbose_level > 2)
printf ("Incompatible store_reg type: expected %s but got %s\n",
mono_inst_name (replaced_op),
mono_inst_name (store->opcode));
return FALSE;
} else {
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (store); }
}
int coerce_op = mono_type_to_stloc_coerce (type);
if (coerce_op)
store->opcode = coerce_op;
else
store->opcode = mono_type_to_regmove (cfg, type);
mini_type_to_eval_stack_type (cfg, type, store);
store->dreg = var->dreg;
mono_atomic_inc_i32 (&mono_jit_stats.stores_eliminated);
return TRUE;
}
static gboolean
lower_store_imm (MonoCompile *cfg, MonoInst *store, MonoInst *ldaddr)
{
MonoInst *var = (MonoInst *)ldaddr->inst_p0;
MonoType *type = m_class_get_byval_arg (var->klass);
int store_op = mono_type_to_store_membase (cfg, type);
if (store_op == OP_STOREV_MEMBASE || store_op == OP_STOREX_MEMBASE)
return FALSE;
switch (store->opcode) {
#if TARGET_SIZEOF_VOID_P == 4
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI4_MEMBASE_IMM:
if (!is_int_stack_size (var->type)) {
if (cfg->verbose_level > 2) printf ("Incompatible variable of size != 4\n");
return FALSE;
}
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (store); }
store->opcode = OP_ICONST;
store->type = STACK_I4;
store->dreg = var->dreg;
store->inst_c0 = store->inst_imm;
break;
#if TARGET_SIZEOF_VOID_P == 8
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
if (!is_long_stack_size (var->type)) {
if (cfg->verbose_level > 2) printf ("Incompatible variable of size != 8\n");
return FALSE;
}
if (cfg->verbose_level > 2) { printf ("mem2reg replacing: "); mono_print_ins (store); }
store->opcode = OP_I8CONST;
store->type = STACK_I8;
store->dreg = var->dreg;
store->inst_l = store->inst_imm;
break;
default:
return FALSE;
}
mono_atomic_inc_i32 (&mono_jit_stats.stores_eliminated);
return TRUE;
}
static void
kill_call_arg_alias (MonoCompile *cfg, GHashTable *addr_loads, GSList *l)
{
for (; l; l = l->next) {
MonoInst *tmp;
guint32 regpair, reg;
regpair = (guint32)(gssize)(l->data);
reg = regpair & 0xffffff;
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (reg));
if (tmp) {
// This call passes an alias as an argument. This means that the contents
// of the passed pointer can change. If the content is also an alias then
// we need to forget it as we do for moves.
if (g_hash_table_remove (addr_loads, GINT_TO_POINTER (((MonoInst*)tmp->inst_p0)->dreg))) {
if (cfg->verbose_level > 2)
printf ("Killed alias %d\n", ((MonoInst*)tmp->inst_p0)->dreg);
}
}
}
}
static gboolean
lower_memory_access (MonoCompile *cfg)
{
MonoBasicBlock *bb;
MonoInst *ins, *tmp;
gboolean needs_dce = FALSE;
GHashTable *addr_loads = g_hash_table_new (NULL, NULL);
//FIXME optimize
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
g_hash_table_remove_all (addr_loads);
for (ins = bb->code; ins; ins = ins->next) {
handle_instruction:
switch (ins->opcode) {
case OP_LDADDR: {
MonoInst *var = (MonoInst*)ins->inst_p0;
if (var->flags & MONO_INST_VOLATILE) {
if (cfg->verbose_level > 2) { printf ("Found address to volatile var, can't take it: "); mono_print_ins (ins); }
} else {
g_hash_table_insert (addr_loads, GINT_TO_POINTER (ins->dreg), ins);
if (cfg->verbose_level > 2) { printf ("New address: "); mono_print_ins (ins); }
}
break;
}
case OP_MOVE:
tmp = (MonoInst*)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->sreg1));
/*
Forward propagate known aliases
ldaddr R10 <- R8
mov R11 <- R10
*/
if (tmp) {
g_hash_table_insert (addr_loads, GINT_TO_POINTER (ins->dreg), tmp);
if (cfg->verbose_level > 2) { printf ("New alias: "); mono_print_ins (ins); }
} else {
/*
Source value is not a know address, kill the variable.
*/
if (g_hash_table_remove (addr_loads, GINT_TO_POINTER (ins->dreg))) {
if (cfg->verbose_level > 2) { printf ("Killed alias: "); mono_print_ins (ins); }
}
}
break;
case OP_LOADV_MEMBASE:
case OP_LOAD_MEMBASE:
case OP_LOADU1_MEMBASE:
case OP_LOADI2_MEMBASE:
case OP_LOADU2_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
case OP_LOADI1_MEMBASE:
case OP_LOADI8_MEMBASE:
#ifndef MONO_ARCH_SOFT_FLOAT_FALLBACK
case OP_LOADR4_MEMBASE:
#endif
case OP_LOADR8_MEMBASE:
if (ins->inst_offset != 0)
continue;
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->sreg1));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found candidate load:"); mono_print_ins (ins); }
if (lower_load (cfg, ins, tmp)) {
needs_dce = TRUE;
/* Try to propagate known aliases if an OP_MOVE was inserted */
goto handle_instruction;
}
}
break;
case OP_STORE_MEMBASE_REG:
case OP_STOREI1_MEMBASE_REG:
case OP_STOREI2_MEMBASE_REG:
case OP_STOREI4_MEMBASE_REG:
case OP_STOREI8_MEMBASE_REG:
#ifndef MONO_ARCH_SOFT_FLOAT_FALLBACK
case OP_STORER4_MEMBASE_REG:
#endif
case OP_STORER8_MEMBASE_REG:
case OP_STOREV_MEMBASE:
tmp = NULL;
if (ins->opcode == OP_STOREV_MEMBASE) {
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->dreg));
if (tmp)
ins->flags |= MONO_INST_STACK_STORE;
}
if (ins->inst_offset != 0)
continue;
if (!tmp)
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->dreg));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found candidate store:"); mono_print_ins (ins); }
if (lower_store (cfg, ins, tmp)) {
needs_dce = TRUE;
/* Try to propagate known aliases if an OP_MOVE was inserted */
goto handle_instruction;
}
}
break;
//FIXME missing storei1_membase_imm and storei2_membase_imm
case OP_STORE_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM:
if (ins->inst_offset != 0)
continue;
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->dreg));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found candidate store-imm:"); mono_print_ins (ins); }
needs_dce |= lower_store_imm (cfg, ins, tmp);
}
break;
case OP_CHECK_THIS:
case OP_NOT_NULL:
tmp = (MonoInst *)g_hash_table_lookup (addr_loads, GINT_TO_POINTER (ins->sreg1));
if (tmp) {
if (cfg->verbose_level > 2) { printf ("Found null check over local: "); mono_print_ins (ins); }
NULLIFY_INS (ins);
needs_dce = TRUE;
}
break;
default: {
if (MONO_IS_CALL (ins)) {
MonoCallInst *call = (MonoCallInst*)ins;
kill_call_arg_alias (cfg, addr_loads, call->out_ireg_args);
}
// FIXME Kill more aliases if used as dreg, since we are not in ssa form.
// This would need some optimizations so we don't lookup hash table for every
// instruction
break;
}
}
}
}
g_hash_table_destroy (addr_loads);
/* There could be ldaddr instructions which already got eliminated */
if (COMPILE_LLVM (cfg))
return TRUE;
return needs_dce;
}
static gboolean
recompute_aliased_variables (MonoCompile *cfg, int *restored_vars)
{
int i;
MonoBasicBlock *bb;
MonoInst *ins;
int kills = 0;
int adds = 0;
*restored_vars = 0;
for (i = 0; i < cfg->num_varinfo; i++) {
MonoInst *var = cfg->varinfo [i];
if (var->flags & MONO_INST_INDIRECT) {
if (cfg->verbose_level > 2) {
printf ("Killing :"); mono_print_ins (var);
}
++kills;
}
var->flags &= ~MONO_INST_INDIRECT;
}
if (!kills)
return FALSE;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
for (ins = bb->code; ins; ins = ins->next) {
if (ins->opcode == OP_LDADDR) {
MonoInst *var;
if (cfg->verbose_level > 2) { printf ("Found op :"); mono_print_ins (ins); }
var = (MonoInst*)ins->inst_p0;
if (!(var->flags & MONO_INST_INDIRECT)) {
if (cfg->verbose_level > 1) { printf ("Restoring :"); mono_print_ins (var); }
++adds;
}
var->flags |= MONO_INST_INDIRECT;
}
}
}
*restored_vars = adds;
mono_atomic_fetch_add_i32 (&mono_jit_stats.alias_found, kills);
mono_atomic_fetch_add_i32 (&mono_jit_stats.alias_removed, kills - adds);
if (kills > adds) {
if (cfg->verbose_level > 2) {
printf ("Method: %s\n", mono_method_full_name (cfg->method, 1));
printf ("Kills %d Adds %d\n", kills, adds);
}
return TRUE;
}
return FALSE;
}
/*
FIXME:
Don't DCE on the whole CFG, only the BBs that have changed.
TODO:
SRVT of small types can fix cases of mismatch for fields of a different type than the component.
Handle aliasing of byrefs in call conventions.
*/
void
mono_local_alias_analysis (MonoCompile *cfg)
{
int i, restored_vars = 1;
if (!cfg->has_indirection)
return;
if (cfg->verbose_level > 2)
mono_print_code (cfg, "BEFORE ALIAS_ANALYSIS");
/*
Remove indirection and memory access of known variables.
*/
if (!lower_memory_access (cfg))
goto done;
/*
By replacing indirect access with direct operations, some LDADDR ops become dead. Kill them.
*/
if (cfg->opt & MONO_OPT_DEADCE)
mono_local_deadce (cfg);
/*
Some variables no longer need to be flagged as indirect, find them.
Since indirect vars are converted into global vregs, each pass eliminates only one level of indirection.
Most cases only need one pass and some 2.
*/
for (i = 0; i < 3 && restored_vars > 0 && recompute_aliased_variables (cfg, &restored_vars); ++i) {
/*
A lot of simplification just took place, we recompute local variables and do DCE to
really profit from the previous gains
*/
mono_handle_global_vregs (cfg);
if (cfg->opt & MONO_OPT_DEADCE)
mono_local_deadce (cfg);
}
done:
if (cfg->verbose_level > 2)
mono_print_code (cfg, "AFTER ALIAS_ANALYSIS");
}
#else /* !DISABLE_JIT */
MONO_EMPTY_SOURCE_FILE (alias_analysis);
#endif /* !DISABLE_JIT */
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/component/diagnostics_server.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#include <config.h>
#include <mono/component/diagnostics_server.h>
#include <mono/utils/mono-publib.h>
#include <mono/utils/mono-compiler.h>
#include <eventpipe/ds-server.h>
static bool
diagnostics_server_available (void);
static MonoComponentDiagnosticsServer fn_table = {
{ MONO_COMPONENT_ITF_VERSION, &diagnostics_server_available },
&ds_server_init,
&ds_server_shutdown,
&ds_server_pause_for_diagnostics_monitor,
&ds_server_disable
};
static bool
diagnostics_server_available (void)
{
return true;
}
MonoComponentDiagnosticsServer *
mono_component_diagnostics_server_init (void)
{
return &fn_table;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#include <config.h>
#include <mono/component/diagnostics_server.h>
#include <mono/utils/mono-publib.h>
#include <mono/utils/mono-compiler.h>
#include <eventpipe/ds-server.h>
static bool
diagnostics_server_available (void);
static MonoComponentDiagnosticsServer fn_table = {
{ MONO_COMPONENT_ITF_VERSION, &diagnostics_server_available },
&ds_server_init,
&ds_server_shutdown,
&ds_server_pause_for_diagnostics_monitor,
&ds_server_disable
};
static bool
diagnostics_server_available (void)
{
return true;
}
MonoComponentDiagnosticsServer *
mono_component_diagnostics_server_init (void)
{
return &fn_table;
}
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/external/rapidjson/internal/strtod.h | // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_STRTOD_
#define RAPIDJSON_STRTOD_
#include "ieee754.h"
#include "biginteger.h"
#include "diyfp.h"
#include "pow10.h"
#include <climits>
#include <limits>
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
inline double FastPath(double significand, int exp) {
if (exp < -308)
return 0.0;
else if (exp >= 0)
return significand * internal::Pow10(exp);
else
return significand / internal::Pow10(-exp);
}
inline double StrtodNormalPrecision(double d, int p) {
if (p < -308) {
// Prevent expSum < -308, making Pow10(p) = 0
d = FastPath(d, -308);
d = FastPath(d, p + 308);
}
else
d = FastPath(d, p);
return d;
}
template <typename T>
inline T Min3(T a, T b, T c) {
T m = a;
if (m > b) m = b;
if (m > c) m = c;
return m;
}
inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
const Double db(b);
const uint64_t bInt = db.IntegerSignificand();
const int bExp = db.IntegerExponent();
const int hExp = bExp - 1;
int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
// Adjust for decimal exponent
if (dExp >= 0) {
dS_Exp2 += dExp;
dS_Exp5 += dExp;
}
else {
bS_Exp2 -= dExp;
bS_Exp5 -= dExp;
hS_Exp2 -= dExp;
hS_Exp5 -= dExp;
}
// Adjust for binary exponent
if (bExp >= 0)
bS_Exp2 += bExp;
else {
dS_Exp2 -= bExp;
hS_Exp2 -= bExp;
}
// Adjust for half ulp exponent
if (hExp >= 0)
hS_Exp2 += hExp;
else {
dS_Exp2 -= hExp;
bS_Exp2 -= hExp;
}
// Remove common power of two factor from all three scaled values
int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
dS_Exp2 -= common_Exp2;
bS_Exp2 -= common_Exp2;
hS_Exp2 -= common_Exp2;
BigInteger dS = d;
dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
BigInteger bS(bInt);
bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
BigInteger hS(1);
hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
BigInteger delta(0);
dS.Difference(bS, &delta);
return delta.Compare(hS);
}
inline bool StrtodFast(double d, int p, double* result) {
// Use fast path for string-to-double conversion if possible
// see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
if (p > 22 && p < 22 + 16) {
// Fast Path Cases In Disguise
d *= internal::Pow10(p - 22);
p = 22;
}
if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
*result = FastPath(d, p);
return true;
}
else
return false;
}
// Compute an approximation and see if it is within 1/2 ULP
inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) {
uint64_t significand = 0;
int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
for (; i < dLen; i++) {
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
break;
significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
}
if (i < dLen && decimals[i] >= '5') // Rounding
significand++;
int remaining = dLen - i;
const int kUlpShift = 3;
const int kUlp = 1 << kUlpShift;
int64_t error = (remaining == 0) ? 0 : kUlp / 2;
DiyFp v(significand, 0);
v = v.Normalize();
error <<= -v.e;
dExp += remaining;
int actualExp;
DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
if (actualExp != dExp) {
static const DiyFp kPow10[] = {
DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 0x00000000), -60), // 10^1
DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 0x00000000), -57), // 10^2
DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 0x00000000), -54), // 10^3
DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), -50), // 10^4
DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 0x00000000), -47), // 10^5
DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 0x00000000), -44), // 10^6
DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 0x00000000), -40) // 10^7
};
int adjustment = dExp - actualExp;
RAPIDJSON_ASSERT(adjustment >= 1 && adjustment < 8);
v = v * kPow10[adjustment - 1];
if (dLen + adjustment > 19) // has more digits than decimal digits in 64-bit
error += kUlp / 2;
}
v = v * cachedPower;
error += kUlp + (error == 0 ? 0 : 1);
const int oldExp = v.e;
v = v.Normalize();
error <<= oldExp - v.e;
const int effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
int precisionSize = 64 - effectiveSignificandSize;
if (precisionSize + kUlpShift >= 64) {
int scaleExp = (precisionSize + kUlpShift) - 63;
v.f >>= scaleExp;
v.e += scaleExp;
error = (error >> scaleExp) + 1 + kUlp;
precisionSize -= scaleExp;
}
DiyFp rounded(v.f >> precisionSize, v.e + precisionSize);
const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
rounded.f++;
if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
rounded.f >>= 1;
rounded.e++;
}
}
*result = rounded.ToDouble();
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
}
inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) {
RAPIDJSON_ASSERT(dLen >= 0);
const BigInteger dInt(decimals, static_cast<unsigned>(dLen));
Double a(approx);
int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
if (cmp < 0)
return a.Value(); // within half ULP
else if (cmp == 0) {
// Round towards even
if (a.Significand() & 1)
return a.NextPositiveDouble();
else
return a.Value();
}
else // adjustment
return a.NextPositiveDouble();
}
inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
RAPIDJSON_ASSERT(d >= 0.0);
RAPIDJSON_ASSERT(length >= 1);
double result = 0.0;
if (StrtodFast(d, p, &result))
return result;
RAPIDJSON_ASSERT(length <= INT_MAX);
int dLen = static_cast<int>(length);
RAPIDJSON_ASSERT(length >= decimalPosition);
RAPIDJSON_ASSERT(length - decimalPosition <= INT_MAX);
int dExpAdjust = static_cast<int>(length - decimalPosition);
RAPIDJSON_ASSERT(exp >= INT_MIN + dExpAdjust);
int dExp = exp - dExpAdjust;
// Make sure length+dExp does not overflow
RAPIDJSON_ASSERT(dExp <= INT_MAX - dLen);
// Trim leading zeros
while (dLen > 0 && *decimals == '0') {
dLen--;
decimals++;
}
// Trim trailing zeros
while (dLen > 0 && decimals[dLen - 1] == '0') {
dLen--;
dExp++;
}
if (dLen == 0) { // Buffer only contains zeros.
return 0.0;
}
// Trim right-most digits
const int kMaxDecimalDigit = 767 + 1;
if (dLen > kMaxDecimalDigit) {
dExp += dLen - kMaxDecimalDigit;
dLen = kMaxDecimalDigit;
}
// If too small, underflow to zero.
// Any x <= 10^-324 is interpreted as zero.
if (dLen + dExp <= -324)
return 0.0;
// If too large, overflow to infinity.
// Any x >= 10^309 is interpreted as +infinity.
if (dLen + dExp > 309)
return std::numeric_limits<double>::infinity();
if (StrtodDiyFp(decimals, dLen, dExp, &result))
return result;
// Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
return StrtodBigInteger(result, decimals, dLen, dExp);
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_STRTOD_
| // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_STRTOD_
#define RAPIDJSON_STRTOD_
#include "ieee754.h"
#include "biginteger.h"
#include "diyfp.h"
#include "pow10.h"
#include <climits>
#include <limits>
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
inline double FastPath(double significand, int exp) {
if (exp < -308)
return 0.0;
else if (exp >= 0)
return significand * internal::Pow10(exp);
else
return significand / internal::Pow10(-exp);
}
inline double StrtodNormalPrecision(double d, int p) {
if (p < -308) {
// Prevent expSum < -308, making Pow10(p) = 0
d = FastPath(d, -308);
d = FastPath(d, p + 308);
}
else
d = FastPath(d, p);
return d;
}
template <typename T>
inline T Min3(T a, T b, T c) {
T m = a;
if (m > b) m = b;
if (m > c) m = c;
return m;
}
inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
const Double db(b);
const uint64_t bInt = db.IntegerSignificand();
const int bExp = db.IntegerExponent();
const int hExp = bExp - 1;
int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
// Adjust for decimal exponent
if (dExp >= 0) {
dS_Exp2 += dExp;
dS_Exp5 += dExp;
}
else {
bS_Exp2 -= dExp;
bS_Exp5 -= dExp;
hS_Exp2 -= dExp;
hS_Exp5 -= dExp;
}
// Adjust for binary exponent
if (bExp >= 0)
bS_Exp2 += bExp;
else {
dS_Exp2 -= bExp;
hS_Exp2 -= bExp;
}
// Adjust for half ulp exponent
if (hExp >= 0)
hS_Exp2 += hExp;
else {
dS_Exp2 -= hExp;
bS_Exp2 -= hExp;
}
// Remove common power of two factor from all three scaled values
int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
dS_Exp2 -= common_Exp2;
bS_Exp2 -= common_Exp2;
hS_Exp2 -= common_Exp2;
BigInteger dS = d;
dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
BigInteger bS(bInt);
bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
BigInteger hS(1);
hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
BigInteger delta(0);
dS.Difference(bS, &delta);
return delta.Compare(hS);
}
inline bool StrtodFast(double d, int p, double* result) {
// Use fast path for string-to-double conversion if possible
// see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
if (p > 22 && p < 22 + 16) {
// Fast Path Cases In Disguise
d *= internal::Pow10(p - 22);
p = 22;
}
if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
*result = FastPath(d, p);
return true;
}
else
return false;
}
// Compute an approximation and see if it is within 1/2 ULP
inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) {
uint64_t significand = 0;
int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
for (; i < dLen; i++) {
if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
(significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
break;
significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
}
if (i < dLen && decimals[i] >= '5') // Rounding
significand++;
int remaining = dLen - i;
const int kUlpShift = 3;
const int kUlp = 1 << kUlpShift;
int64_t error = (remaining == 0) ? 0 : kUlp / 2;
DiyFp v(significand, 0);
v = v.Normalize();
error <<= -v.e;
dExp += remaining;
int actualExp;
DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
if (actualExp != dExp) {
static const DiyFp kPow10[] = {
DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 0x00000000), -60), // 10^1
DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 0x00000000), -57), // 10^2
DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 0x00000000), -54), // 10^3
DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), -50), // 10^4
DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 0x00000000), -47), // 10^5
DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 0x00000000), -44), // 10^6
DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 0x00000000), -40) // 10^7
};
int adjustment = dExp - actualExp;
RAPIDJSON_ASSERT(adjustment >= 1 && adjustment < 8);
v = v * kPow10[adjustment - 1];
if (dLen + adjustment > 19) // has more digits than decimal digits in 64-bit
error += kUlp / 2;
}
v = v * cachedPower;
error += kUlp + (error == 0 ? 0 : 1);
const int oldExp = v.e;
v = v.Normalize();
error <<= oldExp - v.e;
const int effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
int precisionSize = 64 - effectiveSignificandSize;
if (precisionSize + kUlpShift >= 64) {
int scaleExp = (precisionSize + kUlpShift) - 63;
v.f >>= scaleExp;
v.e += scaleExp;
error = (error >> scaleExp) + 1 + kUlp;
precisionSize -= scaleExp;
}
DiyFp rounded(v.f >> precisionSize, v.e + precisionSize);
const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
rounded.f++;
if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
rounded.f >>= 1;
rounded.e++;
}
}
*result = rounded.ToDouble();
return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
}
inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) {
RAPIDJSON_ASSERT(dLen >= 0);
const BigInteger dInt(decimals, static_cast<unsigned>(dLen));
Double a(approx);
int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
if (cmp < 0)
return a.Value(); // within half ULP
else if (cmp == 0) {
// Round towards even
if (a.Significand() & 1)
return a.NextPositiveDouble();
else
return a.Value();
}
else // adjustment
return a.NextPositiveDouble();
}
inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
RAPIDJSON_ASSERT(d >= 0.0);
RAPIDJSON_ASSERT(length >= 1);
double result = 0.0;
if (StrtodFast(d, p, &result))
return result;
RAPIDJSON_ASSERT(length <= INT_MAX);
int dLen = static_cast<int>(length);
RAPIDJSON_ASSERT(length >= decimalPosition);
RAPIDJSON_ASSERT(length - decimalPosition <= INT_MAX);
int dExpAdjust = static_cast<int>(length - decimalPosition);
RAPIDJSON_ASSERT(exp >= INT_MIN + dExpAdjust);
int dExp = exp - dExpAdjust;
// Make sure length+dExp does not overflow
RAPIDJSON_ASSERT(dExp <= INT_MAX - dLen);
// Trim leading zeros
while (dLen > 0 && *decimals == '0') {
dLen--;
decimals++;
}
// Trim trailing zeros
while (dLen > 0 && decimals[dLen - 1] == '0') {
dLen--;
dExp++;
}
if (dLen == 0) { // Buffer only contains zeros.
return 0.0;
}
// Trim right-most digits
const int kMaxDecimalDigit = 767 + 1;
if (dLen > kMaxDecimalDigit) {
dExp += dLen - kMaxDecimalDigit;
dLen = kMaxDecimalDigit;
}
// If too small, underflow to zero.
// Any x <= 10^-324 is interpreted as zero.
if (dLen + dExp <= -324)
return 0.0;
// If too large, overflow to infinity.
// Any x >= 10^309 is interpreted as +infinity.
if (dLen + dExp > 309)
return std::numeric_limits<double>::infinity();
if (StrtodDiyFp(decimals, dLen, dExp, &result))
return result;
// Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
return StrtodBigInteger(result, decimals, dLen, dExp);
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_STRTOD_
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/mini/mini-riscv.c | /*
* Licensed to the .NET Foundation under one or more agreements.
* The .NET Foundation licenses this file to you under the MIT license.
*/
#include <mono/utils/mono-hwcap.h>
#include "mini-runtime.h"
#ifdef TARGET_RISCV64
#include "cpu-riscv64.h"
#else
#include "cpu-riscv32.h"
#endif
static gboolean riscv_stdext_a, riscv_stdext_b, riscv_stdext_c,
riscv_stdext_d, riscv_stdext_f, riscv_stdext_j,
riscv_stdext_l, riscv_stdext_m, riscv_stdext_n,
riscv_stdext_p, riscv_stdext_q, riscv_stdext_t,
riscv_stdext_v;
void
mono_arch_cpu_init (void)
{
}
void
mono_arch_init (void)
{
riscv_stdext_a = mono_hwcap_riscv_has_stdext_a;
riscv_stdext_c = mono_hwcap_riscv_has_stdext_c;
riscv_stdext_d = mono_hwcap_riscv_has_stdext_d;
riscv_stdext_f = mono_hwcap_riscv_has_stdext_f;
riscv_stdext_m = mono_hwcap_riscv_has_stdext_m;
}
void
mono_arch_finish_init (void)
{
}
void
mono_arch_register_lowlevel_calls (void)
{
}
void
mono_arch_cleanup (void)
{
}
void
mono_arch_set_target (char *mtriple)
{
// riscv{32,64}[extensions]-[<vendor>-]<system>-<abi>
size_t len = strlen (MONO_RISCV_ARCHITECTURE);
if (!strncmp (mtriple, MONO_RISCV_ARCHITECTURE, len)) {
mtriple += len;
for (;;) {
char c = *mtriple;
if (!c || c == '-')
break;
// ISA manual says upper and lower case are both OK.
switch (c) {
case 'A':
case 'a':
riscv_stdext_a = TRUE;
break;
case 'B':
case 'b':
riscv_stdext_b = TRUE;
break;
case 'C':
case 'c':
riscv_stdext_c = TRUE;
break;
case 'D':
case 'd':
riscv_stdext_d = TRUE;
break;
case 'F':
case 'f':
riscv_stdext_f = TRUE;
break;
case 'J':
case 'j':
riscv_stdext_j = TRUE;
break;
case 'L':
case 'l':
riscv_stdext_l = TRUE;
break;
case 'M':
case 'm':
riscv_stdext_m = TRUE;
break;
case 'N':
case 'n':
riscv_stdext_n = TRUE;
break;
case 'P':
case 'p':
riscv_stdext_p = TRUE;
break;
case 'Q':
case 'q':
riscv_stdext_q = TRUE;
break;
case 'T':
case 't':
riscv_stdext_t = TRUE;
break;
case 'V':
case 'v':
riscv_stdext_v = TRUE;
break;
default:
break;
}
mtriple++;
}
}
}
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
*exclude_mask = 0;
return 0;
}
gboolean
mono_arch_have_fast_tls (void)
{
return TRUE;
}
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
#ifdef TARGET_RISCV64
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I8:
case OP_ATOMIC_CAS_I8:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8:
#endif
return riscv_stdext_a;
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_STORE_R4:
#ifdef TARGET_RISCV64
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_R8:
#endif
return riscv_stdext_a && riscv_stdext_d;
default:
return FALSE;
}
}
const char *
mono_arch_regname (int reg)
{
static const char *names [RISCV_N_GREGS] = {
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5",
"a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6",
};
if (reg >= 0 && reg < G_N_ELEMENTS (names))
return names [reg];
return "x?";
}
const char*
mono_arch_fregname (int reg)
{
static const char *names [RISCV_N_FREGS] = {
"ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",
"fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",
"fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",
"fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11",
};
if (reg >= 0 && reg < G_N_ELEMENTS (names))
return names [reg];
return "f?";
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer) regs [RISCV_A0];
}
MonoMethod *
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod *) regs [MONO_ARCH_IMT_REG];
}
MonoVTable *
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable *) regs [MONO_ARCH_VTABLE_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, RISCV_SP, 0);
return l;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->gregs [reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->gregs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->gregs [reg] = val;
}
void
mono_arch_flush_register_windows (void)
{
}
void
mono_arch_flush_icache (guint8 *code, gint size)
{
#ifndef MONO_CROSS_COMPILE
__builtin___clear_cache (code, code + size);
#endif
}
MonoDynCallInfo *
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
return NULL;
}
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
NOT_IMPLEMENTED;
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
NOT_IMPLEMENTED;
return 0;
}
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret,
guint8 *buf)
{
NOT_IMPLEMENTED;
}
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
NOT_IMPLEMENTED;
}
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count,
MonoJitArgumentInfo *arg_info)
{
NOT_IMPLEMENTED;
return 0;
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code,
MonoJumpInfo *ji, gpointer target)
{
NOT_IMPLEMENTED;
}
/* Set arguments in the ccontext (for i2n entry) */
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
/* Set return value in the ccontext (for n2i return) */
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
NOT_IMPLEMENTED;
}
/* Gets the arguments from ccontext (for n2i entry) */
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
/* Gets the return value from ccontext (for i2n exit) */
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
#ifndef DISABLE_JIT
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
gboolean
mono_arch_is_soft_float (void)
{
return !riscv_stdext_d;
}
#endif
gboolean
mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
{
switch (opcode) {
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
#ifdef TARGET_RISCV64
case OP_LDIV:
case OP_LDIV_UN:
case OP_LREM:
case OP_LREM_UN:
#endif
return !riscv_stdext_m;
default:
return TRUE;
}
}
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
NOT_IMPLEMENTED;
}
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
// TODO: Make a proper decision based on opcode.
return TRUE;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
for (guint i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if ((ins->flags & (MONO_INST_IS_DEAD | MONO_INST_VOLATILE | MONO_INST_INDIRECT)) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
if (!mono_is_regsize_var (ins->inst_vtype))
continue;
vars = g_list_prepend (vars, vmv);
}
vars = mono_varlist_sort (cfg, vars, 0);
return vars;
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
for (int i = RISCV_S0; i <= RISCV_S11; i++)
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
return regs;
}
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
return cfg->varinfo [vmv->idx]->opcode == OP_ARG ? 1 : 2;
}
#ifdef ENABLE_LLVM
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
#endif
void
mono_arch_create_vars (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
MonoInst *
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod,
MonoMethodSignature *fsig, MonoInst **args)
{
return NULL;
}
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
NOT_IMPLEMENTED;
}
void
mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
{
NOT_IMPLEMENTED;
}
void
mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
{
#ifdef TARGET_RISCV32
NOT_IMPLEMENTED;
#endif
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
NOT_IMPLEMENTED;
}
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
// Uses at most 8 bytes on RV32I and 16 bytes on RV64I.
guint8 *
mono_riscv_emit_imm (guint8 *code, int rd, gsize imm)
{
#ifdef TARGET_RISCV64
if (RISCV_VALID_I_IMM (imm)) {
riscv_addi (code, rd, RISCV_ZERO, imm);
return code;
}
/*
* This is not pretty, but RV64I doesn't make it easy to load constants.
* Need to figure out something better.
*/
riscv_jal (code, rd, sizeof (guint64));
*(guint64 *) code = imm;
code += sizeof (guint64);
riscv_ld (code, rd, rd, 0);
#else
if (RISCV_VALID_I_IMM (imm)) {
riscv_addi (code, rd, RISCV_ZERO, imm);
return code;
}
riscv_lui (code, rd, RISCV_BITS (imm, 12, 20));
if (!RISCV_VALID_U_IMM (imm))
riscv_ori (code, rd, rd, RISCV_BITS (imm, 0, 12));
#endif
return code;
}
// Uses at most 16 bytes on RV32I and 24 bytes on RV64I.
guint8 *
mono_riscv_emit_load (guint8 *code, int rd, int rs1, gint32 imm)
{
if (RISCV_VALID_I_IMM (imm)) {
#ifdef TARGET_RISCV64
riscv_ld (code, rd, rs1, imm);
#else
riscv_lw (code, rd, rs1, imm);
#endif
} else {
code = mono_riscv_emit_imm (code, rd, imm);
riscv_add (code, rd, rs1, rd);
#ifdef TARGET_RISCV64
riscv_ld (code, rd, rd, 0);
#else
riscv_lw (code, rd, rd, 0);
#endif
}
return code;
}
// May clobber t1. Uses at most 16 bytes on RV32I and 24 bytes on RV64I.
guint8 *
mono_riscv_emit_store (guint8 *code, int rs1, int rs2, gint32 imm)
{
if (RISCV_VALID_S_IMM (imm)) {
#ifdef TARGET_RISCV64
riscv_sd (code, rs1, rs2, imm);
#else
riscv_sw (code, rs1, rs2, imm);
#endif
} else {
code = mono_riscv_emit_imm (code, RISCV_T1, imm);
riscv_add (code, RISCV_T1, rs2, RISCV_T1);
#ifdef TARGET_RISCV64
riscv_sd (code, rs1, RISCV_T1, 0);
#else
riscv_sw (code, rs1, RISCV_T1, 0);
#endif
}
return code;
}
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
NOT_IMPLEMENTED;
return 0;
}
GSList *
mono_arch_get_trampolines (gboolean aot)
{
NOT_IMPLEMENTED;
return NULL;
}
#endif
#if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
NOT_IMPLEMENTED;
}
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
NOT_IMPLEMENTED;
}
void
mono_arch_start_single_stepping (void)
{
NOT_IMPLEMENTED;
}
void
mono_arch_stop_single_stepping (void)
{
NOT_IMPLEMENTED;
}
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
NOT_IMPLEMENTED;
return FALSE;
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
NOT_IMPLEMENTED;
return FALSE;
}
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
NOT_IMPLEMENTED;
}
void
mono_arch_skip_single_step (MonoContext *ctx)
{
NOT_IMPLEMENTED;
}
gpointer
mono_arch_get_seq_point_info (guint8 *code)
{
NOT_IMPLEMENTED;
return NULL;
}
#endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
return NULL;
}
| /*
* Licensed to the .NET Foundation under one or more agreements.
* The .NET Foundation licenses this file to you under the MIT license.
*/
#include <mono/utils/mono-hwcap.h>
#include "mini-runtime.h"
#ifdef TARGET_RISCV64
#include "cpu-riscv64.h"
#else
#include "cpu-riscv32.h"
#endif
static gboolean riscv_stdext_a, riscv_stdext_b, riscv_stdext_c,
riscv_stdext_d, riscv_stdext_f, riscv_stdext_j,
riscv_stdext_l, riscv_stdext_m, riscv_stdext_n,
riscv_stdext_p, riscv_stdext_q, riscv_stdext_t,
riscv_stdext_v;
void
mono_arch_cpu_init (void)
{
}
void
mono_arch_init (void)
{
riscv_stdext_a = mono_hwcap_riscv_has_stdext_a;
riscv_stdext_c = mono_hwcap_riscv_has_stdext_c;
riscv_stdext_d = mono_hwcap_riscv_has_stdext_d;
riscv_stdext_f = mono_hwcap_riscv_has_stdext_f;
riscv_stdext_m = mono_hwcap_riscv_has_stdext_m;
}
void
mono_arch_finish_init (void)
{
}
void
mono_arch_register_lowlevel_calls (void)
{
}
void
mono_arch_cleanup (void)
{
}
void
mono_arch_set_target (char *mtriple)
{
// riscv{32,64}[extensions]-[<vendor>-]<system>-<abi>
size_t len = strlen (MONO_RISCV_ARCHITECTURE);
if (!strncmp (mtriple, MONO_RISCV_ARCHITECTURE, len)) {
mtriple += len;
for (;;) {
char c = *mtriple;
if (!c || c == '-')
break;
// ISA manual says upper and lower case are both OK.
switch (c) {
case 'A':
case 'a':
riscv_stdext_a = TRUE;
break;
case 'B':
case 'b':
riscv_stdext_b = TRUE;
break;
case 'C':
case 'c':
riscv_stdext_c = TRUE;
break;
case 'D':
case 'd':
riscv_stdext_d = TRUE;
break;
case 'F':
case 'f':
riscv_stdext_f = TRUE;
break;
case 'J':
case 'j':
riscv_stdext_j = TRUE;
break;
case 'L':
case 'l':
riscv_stdext_l = TRUE;
break;
case 'M':
case 'm':
riscv_stdext_m = TRUE;
break;
case 'N':
case 'n':
riscv_stdext_n = TRUE;
break;
case 'P':
case 'p':
riscv_stdext_p = TRUE;
break;
case 'Q':
case 'q':
riscv_stdext_q = TRUE;
break;
case 'T':
case 't':
riscv_stdext_t = TRUE;
break;
case 'V':
case 'v':
riscv_stdext_v = TRUE;
break;
default:
break;
}
mtriple++;
}
}
}
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
*exclude_mask = 0;
return 0;
}
gboolean
mono_arch_have_fast_tls (void)
{
return TRUE;
}
gboolean
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
case OP_ATOMIC_LOAD_I1:
case OP_ATOMIC_LOAD_I2:
case OP_ATOMIC_LOAD_I4:
case OP_ATOMIC_LOAD_U1:
case OP_ATOMIC_LOAD_U2:
case OP_ATOMIC_LOAD_U4:
case OP_ATOMIC_STORE_I1:
case OP_ATOMIC_STORE_I2:
case OP_ATOMIC_STORE_I4:
case OP_ATOMIC_STORE_U1:
case OP_ATOMIC_STORE_U2:
case OP_ATOMIC_STORE_U4:
#ifdef TARGET_RISCV64
case OP_ATOMIC_ADD_I8:
case OP_ATOMIC_EXCHANGE_I8:
case OP_ATOMIC_CAS_I8:
case OP_ATOMIC_LOAD_I8:
case OP_ATOMIC_LOAD_U8:
case OP_ATOMIC_STORE_I8:
case OP_ATOMIC_STORE_U8:
#endif
return riscv_stdext_a;
case OP_ATOMIC_LOAD_R4:
case OP_ATOMIC_STORE_R4:
#ifdef TARGET_RISCV64
case OP_ATOMIC_LOAD_R8:
case OP_ATOMIC_STORE_R8:
#endif
return riscv_stdext_a && riscv_stdext_d;
default:
return FALSE;
}
}
const char *
mono_arch_regname (int reg)
{
static const char *names [RISCV_N_GREGS] = {
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5",
"a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6",
};
if (reg >= 0 && reg < G_N_ELEMENTS (names))
return names [reg];
return "x?";
}
const char*
mono_arch_fregname (int reg)
{
static const char *names [RISCV_N_FREGS] = {
"ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",
"fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",
"fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",
"fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11",
};
if (reg >= 0 && reg < G_N_ELEMENTS (names))
return names [reg];
return "f?";
}
gpointer
mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
{
return (gpointer) regs [RISCV_A0];
}
MonoMethod *
mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
{
return (MonoMethod *) regs [MONO_ARCH_IMT_REG];
}
MonoVTable *
mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
{
return (MonoVTable *) regs [MONO_ARCH_VTABLE_REG];
}
GSList*
mono_arch_get_cie_program (void)
{
GSList *l = NULL;
mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, RISCV_SP, 0);
return l;
}
host_mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
return ctx->gregs [reg];
}
host_mgreg_t*
mono_arch_context_get_int_reg_address (MonoContext *ctx, int reg)
{
return &ctx->gregs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
{
ctx->gregs [reg] = val;
}
void
mono_arch_flush_register_windows (void)
{
}
void
mono_arch_flush_icache (guint8 *code, gint size)
{
#ifndef MONO_CROSS_COMPILE
__builtin___clear_cache (code, code + size);
#endif
}
MonoDynCallInfo *
mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
return NULL;
}
void
mono_arch_dyn_call_free (MonoDynCallInfo *info)
{
NOT_IMPLEMENTED;
}
int
mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
{
NOT_IMPLEMENTED;
return 0;
}
void
mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret,
guint8 *buf)
{
NOT_IMPLEMENTED;
}
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
NOT_IMPLEMENTED;
}
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count,
MonoJitArgumentInfo *arg_info)
{
NOT_IMPLEMENTED;
return 0;
}
void
mono_arch_patch_code_new (MonoCompile *cfg, guint8 *code,
MonoJumpInfo *ji, gpointer target)
{
NOT_IMPLEMENTED;
}
/* Set arguments in the ccontext (for i2n entry) */
void
mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
/* Set return value in the ccontext (for n2i return) */
void
mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig, gpointer retp)
{
NOT_IMPLEMENTED;
}
/* Gets the arguments from ccontext (for n2i entry) */
gpointer
mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
/* Gets the return value from ccontext (for i2n exit) */
void
mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
#ifndef DISABLE_JIT
#ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
gboolean
mono_arch_is_soft_float (void)
{
return !riscv_stdext_d;
}
#endif
gboolean
mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
{
switch (opcode) {
case OP_IDIV:
case OP_IDIV_UN:
case OP_IREM:
case OP_IREM_UN:
#ifdef TARGET_RISCV64
case OP_LDIV:
case OP_LDIV_UN:
case OP_LREM:
case OP_LREM_UN:
#endif
return !riscv_stdext_m;
default:
return TRUE;
}
}
gboolean
mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
{
NOT_IMPLEMENTED;
}
gboolean
mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
{
// TODO: Make a proper decision based on opcode.
return TRUE;
}
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
GList *vars = NULL;
for (guint i = 0; i < cfg->num_varinfo; i++) {
MonoInst *ins = cfg->varinfo [i];
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
if ((ins->flags & (MONO_INST_IS_DEAD | MONO_INST_VOLATILE | MONO_INST_INDIRECT)) ||
(ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
if (!mono_is_regsize_var (ins->inst_vtype))
continue;
vars = g_list_prepend (vars, vmv);
}
vars = mono_varlist_sort (cfg, vars, 0);
return vars;
}
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
for (int i = RISCV_S0; i <= RISCV_S11; i++)
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
return regs;
}
guint32
mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
{
return cfg->varinfo [vmv->idx]->opcode == OP_ARG ? 1 : 2;
}
#ifdef ENABLE_LLVM
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
NOT_IMPLEMENTED;
}
#endif
void
mono_arch_create_vars (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
MonoInst *
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod,
MonoMethodSignature *fsig, MonoInst **args)
{
return NULL;
}
void
mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
NOT_IMPLEMENTED;
}
void
mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
{
NOT_IMPLEMENTED;
}
void
mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
{
#ifdef TARGET_RISCV32
NOT_IMPLEMENTED;
#endif
}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
NOT_IMPLEMENTED;
}
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
}
// Uses at most 8 bytes on RV32I and 16 bytes on RV64I.
guint8 *
mono_riscv_emit_imm (guint8 *code, int rd, gsize imm)
{
#ifdef TARGET_RISCV64
if (RISCV_VALID_I_IMM (imm)) {
riscv_addi (code, rd, RISCV_ZERO, imm);
return code;
}
/*
* This is not pretty, but RV64I doesn't make it easy to load constants.
* Need to figure out something better.
*/
riscv_jal (code, rd, sizeof (guint64));
*(guint64 *) code = imm;
code += sizeof (guint64);
riscv_ld (code, rd, rd, 0);
#else
if (RISCV_VALID_I_IMM (imm)) {
riscv_addi (code, rd, RISCV_ZERO, imm);
return code;
}
riscv_lui (code, rd, RISCV_BITS (imm, 12, 20));
if (!RISCV_VALID_U_IMM (imm))
riscv_ori (code, rd, rd, RISCV_BITS (imm, 0, 12));
#endif
return code;
}
// Uses at most 16 bytes on RV32I and 24 bytes on RV64I.
guint8 *
mono_riscv_emit_load (guint8 *code, int rd, int rs1, gint32 imm)
{
if (RISCV_VALID_I_IMM (imm)) {
#ifdef TARGET_RISCV64
riscv_ld (code, rd, rs1, imm);
#else
riscv_lw (code, rd, rs1, imm);
#endif
} else {
code = mono_riscv_emit_imm (code, rd, imm);
riscv_add (code, rd, rs1, rd);
#ifdef TARGET_RISCV64
riscv_ld (code, rd, rd, 0);
#else
riscv_lw (code, rd, rd, 0);
#endif
}
return code;
}
// May clobber t1. Uses at most 16 bytes on RV32I and 24 bytes on RV64I.
guint8 *
mono_riscv_emit_store (guint8 *code, int rs1, int rs2, gint32 imm)
{
if (RISCV_VALID_S_IMM (imm)) {
#ifdef TARGET_RISCV64
riscv_sd (code, rs1, rs2, imm);
#else
riscv_sw (code, rs1, rs2, imm);
#endif
} else {
code = mono_riscv_emit_imm (code, RISCV_T1, imm);
riscv_add (code, RISCV_T1, rs2, RISCV_T1);
#ifdef TARGET_RISCV64
riscv_sd (code, rs1, RISCV_T1, 0);
#else
riscv_sw (code, rs1, RISCV_T1, 0);
#endif
}
return code;
}
guint8 *
mono_arch_emit_prolog (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_epilog (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
NOT_IMPLEMENTED;
}
void
mono_arch_emit_exceptions (MonoCompile *cfg)
{
NOT_IMPLEMENTED;
}
guint32
mono_arch_get_patch_offset (guint8 *code)
{
NOT_IMPLEMENTED;
return 0;
}
GSList *
mono_arch_get_trampolines (gboolean aot)
{
NOT_IMPLEMENTED;
return NULL;
}
#endif
#if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
NOT_IMPLEMENTED;
}
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
NOT_IMPLEMENTED;
}
void
mono_arch_start_single_stepping (void)
{
NOT_IMPLEMENTED;
}
void
mono_arch_stop_single_stepping (void)
{
NOT_IMPLEMENTED;
}
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
NOT_IMPLEMENTED;
return FALSE;
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
NOT_IMPLEMENTED;
return FALSE;
}
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
NOT_IMPLEMENTED;
}
void
mono_arch_skip_single_step (MonoContext *ctx)
{
NOT_IMPLEMENTED;
}
gpointer
mono_arch_get_seq_point_info (guint8 *code)
{
NOT_IMPLEMENTED;
return NULL;
}
#endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
gpointer
mono_arch_load_function (MonoJitICallId jit_icall_id)
{
return NULL;
}
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/sgen/sgen-pointer-queue.h | /**
* \file
* A pointer queue that can be sorted.
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SGEN_POINTER_QUEUE_H__
#define __MONO_SGEN_POINTER_QUEUE_H__
#include <glib.h>
typedef struct {
void **data;
size_t size;
size_t next_slot;
int mem_type;
} SgenPointerQueue;
#define SGEN_POINTER_QUEUE_INIT(mem_type) { NULL, 0, 0, (mem_type) }
void sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr);
void sgen_pointer_queue_clear (SgenPointerQueue *queue);
void sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue);
void sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue);
size_t sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr);
size_t sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr);
void sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type);
void* sgen_pointer_queue_pop (SgenPointerQueue *queue);
gboolean sgen_pointer_queue_is_empty (SgenPointerQueue *queue);
void sgen_pointer_queue_free (SgenPointerQueue *queue);
gboolean sgen_pointer_queue_will_grow (SgenPointerQueue *queue);
#endif
| /**
* \file
* A pointer queue that can be sorted.
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SGEN_POINTER_QUEUE_H__
#define __MONO_SGEN_POINTER_QUEUE_H__
#include <glib.h>
typedef struct {
void **data;
size_t size;
size_t next_slot;
int mem_type;
} SgenPointerQueue;
#define SGEN_POINTER_QUEUE_INIT(mem_type) { NULL, 0, 0, (mem_type) }
void sgen_pointer_queue_add (SgenPointerQueue *queue, void *ptr);
void sgen_pointer_queue_clear (SgenPointerQueue *queue);
void sgen_pointer_queue_remove_nulls (SgenPointerQueue *queue);
void sgen_pointer_queue_sort_uniq (SgenPointerQueue *queue);
size_t sgen_pointer_queue_search (SgenPointerQueue *queue, void *addr);
size_t sgen_pointer_queue_find (SgenPointerQueue *queue, void *ptr);
void sgen_pointer_queue_init (SgenPointerQueue *queue, int mem_type);
void* sgen_pointer_queue_pop (SgenPointerQueue *queue);
gboolean sgen_pointer_queue_is_empty (SgenPointerQueue *queue);
void sgen_pointer_queue_free (SgenPointerQueue *queue);
gboolean sgen_pointer_queue_will_grow (SgenPointerQueue *queue);
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/debug/inc/arm64/primitives.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: primitives.h
//
//
// Platform-specific debugger primitives
//
//*****************************************************************************
#ifndef PRIMITIVES_H_
#define PRIMITIVES_H_
#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
#include "executableallocator.h"
#endif
typedef NEON128 FPRegister64;
typedef const BYTE CORDB_ADDRESS_TYPE;
typedef DPTR(CORDB_ADDRESS_TYPE) PTR_CORDB_ADDRESS_TYPE;
#define MAX_INSTRUCTION_LENGTH 4
// Given a return address retrieved during stackwalk,
// this is the offset by which it should be decremented to land at the call instruction.
#define STACKWALK_CONTROLPC_ADJUST_OFFSET 4
#define PRD_TYPE LONG
#define CORDbg_BREAK_INSTRUCTION_SIZE 4
#define CORDbg_BREAK_INSTRUCTION (LONG)0xD43E0000
#define NZCV_N 0x80000000
#define NZCV_Z 0x40000000
#define NZCV_C 0x20000000
#define NZCV_V 0x10000000
#define NZCV_N_BIT 0x1f
#define NZCV_Z_BIT 0x1e
#define NZCV_C_BIT 0x1d
#define NZCV_V_BIT 0x1c
inline CORDB_ADDRESS GetPatchEndAddr(CORDB_ADDRESS patchAddr)
{
LIMITED_METHOD_DAC_CONTRACT;
return patchAddr + CORDbg_BREAK_INSTRUCTION_SIZE;
}
#define InitializePRDToBreakInst(_pPRD) *(_pPRD) = CORDbg_BREAK_INSTRUCTION
#define PRDIsBreakInst(_pPRD) (*(_pPRD) == CORDbg_BREAK_INSTRUCTION)
#define CORDbgGetInstructionEx(_buffer, _requestedAddr, _patchAddr, _dummy1, _dummy2) \
CORDbgGetInstructionExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr)));
#define CORDbgSetInstructionEx(_buffer, _requestedAddr, _patchAddr, _opcode, _dummy2) \
CORDbgSetInstructionExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr)), (_opcode));
#define CORDbgInsertBreakpointEx(_buffer, _requestedAddr, _patchAddr, _dummy1, _dummy2) \
CORDbgInsertBreakpointExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr)));
constexpr CorDebugRegister g_JITToCorDbgReg[] =
{
REGISTER_ARM64_X0,
REGISTER_ARM64_X1,
REGISTER_ARM64_X2,
REGISTER_ARM64_X3,
REGISTER_ARM64_X4,
REGISTER_ARM64_X5,
REGISTER_ARM64_X6,
REGISTER_ARM64_X7,
REGISTER_ARM64_X8,
REGISTER_ARM64_X9,
REGISTER_ARM64_X10,
REGISTER_ARM64_X11,
REGISTER_ARM64_X12,
REGISTER_ARM64_X13,
REGISTER_ARM64_X14,
REGISTER_ARM64_X15,
REGISTER_ARM64_X16,
REGISTER_ARM64_X17,
REGISTER_ARM64_X18,
REGISTER_ARM64_X19,
REGISTER_ARM64_X20,
REGISTER_ARM64_X21,
REGISTER_ARM64_X22,
REGISTER_ARM64_X23,
REGISTER_ARM64_X24,
REGISTER_ARM64_X25,
REGISTER_ARM64_X26,
REGISTER_ARM64_X27,
REGISTER_ARM64_X28,
REGISTER_ARM64_FP,
REGISTER_ARM64_LR,
REGISTER_ARM64_SP,
REGISTER_ARM64_PC
};
inline void CORDbgSetIP(DT_CONTEXT *context, LPVOID eip) {
LIMITED_METHOD_CONTRACT;
context->Pc = (DWORD64)eip;
}
inline LPVOID CORDbgGetSP(const DT_CONTEXT * context) {
LIMITED_METHOD_CONTRACT;
return (LPVOID)(size_t)(context->Sp);
}
inline void CORDbgSetSP(DT_CONTEXT *context, LPVOID esp) {
LIMITED_METHOD_CONTRACT;
context->Sp = (DWORD64)esp;
}
inline LPVOID CORDbgGetFP(const DT_CONTEXT * context) {
LIMITED_METHOD_CONTRACT;
return (LPVOID)(size_t)(context->Fp);
}
inline void CORDbgSetFP(DT_CONTEXT *context, LPVOID fp) {
LIMITED_METHOD_CONTRACT;
context->Fp = (DWORD64)fp;
}
inline BOOL CompareControlRegisters(const DT_CONTEXT * pCtx1, const DT_CONTEXT * pCtx2)
{
LIMITED_METHOD_DAC_CONTRACT;
// @ARMTODO: Sort out frame registers
if ((pCtx1->Pc == pCtx2->Pc) &&
(pCtx1->Sp == pCtx2->Sp) &&
(pCtx1->Fp == pCtx2->Fp))
{
return TRUE;
}
return FALSE;
}
inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address,
PRD_TYPE instruction)
{
// In a DAC build, this function assumes the input is an host address.
LIMITED_METHOD_DAC_CONTRACT;
#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE) && defined(HOST_OSX)
ExecutableWriterHolder<void> instructionWriterHolder((LPVOID)address, sizeof(PRD_TYPE));
ULONGLONG ptraddr = dac_cast<ULONGLONG>(instructionWriterHolder.GetRW());
#else // !DBI_COMPILE && !DACCESS_COMPILE && HOST_OSX
ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
#endif // !DBI_COMPILE && !DACCESS_COMPILE && HOST_OSX
*(PRD_TYPE *)ptraddr = instruction;
FlushInstructionCache(GetCurrentProcess(),
address,
sizeof(PRD_TYPE));
}
inline PRD_TYPE CORDbgGetInstruction(UNALIGNED CORDB_ADDRESS_TYPE* address)
{
LIMITED_METHOD_CONTRACT;
ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
return *(PRD_TYPE *)ptraddr;
}
inline void SetSSFlag(DT_CONTEXT *pContext)
{
_ASSERTE(pContext != NULL);
pContext->Cpsr |= 0x00200000;
}
inline void UnsetSSFlag(DT_CONTEXT *pContext)
{
_ASSERTE(pContext != NULL);
pContext->Cpsr &= ~0x00200000;
}
inline bool IsSSFlagEnabled(DT_CONTEXT * pContext)
{
_ASSERTE(pContext != NULL);
return (pContext->Cpsr & 0x00200000) != 0;
}
#include "arm_primitives.h"
#endif // PRIMITIVES_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: primitives.h
//
//
// Platform-specific debugger primitives
//
//*****************************************************************************
#ifndef PRIMITIVES_H_
#define PRIMITIVES_H_
#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE)
#include "executableallocator.h"
#endif
typedef NEON128 FPRegister64;
typedef const BYTE CORDB_ADDRESS_TYPE;
typedef DPTR(CORDB_ADDRESS_TYPE) PTR_CORDB_ADDRESS_TYPE;
#define MAX_INSTRUCTION_LENGTH 4
// Given a return address retrieved during stackwalk,
// this is the offset by which it should be decremented to land at the call instruction.
#define STACKWALK_CONTROLPC_ADJUST_OFFSET 4
#define PRD_TYPE LONG
#define CORDbg_BREAK_INSTRUCTION_SIZE 4
#define CORDbg_BREAK_INSTRUCTION (LONG)0xD43E0000
#define NZCV_N 0x80000000
#define NZCV_Z 0x40000000
#define NZCV_C 0x20000000
#define NZCV_V 0x10000000
#define NZCV_N_BIT 0x1f
#define NZCV_Z_BIT 0x1e
#define NZCV_C_BIT 0x1d
#define NZCV_V_BIT 0x1c
inline CORDB_ADDRESS GetPatchEndAddr(CORDB_ADDRESS patchAddr)
{
LIMITED_METHOD_DAC_CONTRACT;
return patchAddr + CORDbg_BREAK_INSTRUCTION_SIZE;
}
#define InitializePRDToBreakInst(_pPRD) *(_pPRD) = CORDbg_BREAK_INSTRUCTION
#define PRDIsBreakInst(_pPRD) (*(_pPRD) == CORDbg_BREAK_INSTRUCTION)
#define CORDbgGetInstructionEx(_buffer, _requestedAddr, _patchAddr, _dummy1, _dummy2) \
CORDbgGetInstructionExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr)));
#define CORDbgSetInstructionEx(_buffer, _requestedAddr, _patchAddr, _opcode, _dummy2) \
CORDbgSetInstructionExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr)), (_opcode));
#define CORDbgInsertBreakpointEx(_buffer, _requestedAddr, _patchAddr, _dummy1, _dummy2) \
CORDbgInsertBreakpointExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr)));
constexpr CorDebugRegister g_JITToCorDbgReg[] =
{
REGISTER_ARM64_X0,
REGISTER_ARM64_X1,
REGISTER_ARM64_X2,
REGISTER_ARM64_X3,
REGISTER_ARM64_X4,
REGISTER_ARM64_X5,
REGISTER_ARM64_X6,
REGISTER_ARM64_X7,
REGISTER_ARM64_X8,
REGISTER_ARM64_X9,
REGISTER_ARM64_X10,
REGISTER_ARM64_X11,
REGISTER_ARM64_X12,
REGISTER_ARM64_X13,
REGISTER_ARM64_X14,
REGISTER_ARM64_X15,
REGISTER_ARM64_X16,
REGISTER_ARM64_X17,
REGISTER_ARM64_X18,
REGISTER_ARM64_X19,
REGISTER_ARM64_X20,
REGISTER_ARM64_X21,
REGISTER_ARM64_X22,
REGISTER_ARM64_X23,
REGISTER_ARM64_X24,
REGISTER_ARM64_X25,
REGISTER_ARM64_X26,
REGISTER_ARM64_X27,
REGISTER_ARM64_X28,
REGISTER_ARM64_FP,
REGISTER_ARM64_LR,
REGISTER_ARM64_SP,
REGISTER_ARM64_PC
};
inline void CORDbgSetIP(DT_CONTEXT *context, LPVOID eip) {
LIMITED_METHOD_CONTRACT;
context->Pc = (DWORD64)eip;
}
inline LPVOID CORDbgGetSP(const DT_CONTEXT * context) {
LIMITED_METHOD_CONTRACT;
return (LPVOID)(size_t)(context->Sp);
}
inline void CORDbgSetSP(DT_CONTEXT *context, LPVOID esp) {
LIMITED_METHOD_CONTRACT;
context->Sp = (DWORD64)esp;
}
inline LPVOID CORDbgGetFP(const DT_CONTEXT * context) {
LIMITED_METHOD_CONTRACT;
return (LPVOID)(size_t)(context->Fp);
}
inline void CORDbgSetFP(DT_CONTEXT *context, LPVOID fp) {
LIMITED_METHOD_CONTRACT;
context->Fp = (DWORD64)fp;
}
inline BOOL CompareControlRegisters(const DT_CONTEXT * pCtx1, const DT_CONTEXT * pCtx2)
{
LIMITED_METHOD_DAC_CONTRACT;
// @ARMTODO: Sort out frame registers
if ((pCtx1->Pc == pCtx2->Pc) &&
(pCtx1->Sp == pCtx2->Sp) &&
(pCtx1->Fp == pCtx2->Fp))
{
return TRUE;
}
return FALSE;
}
inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address,
PRD_TYPE instruction)
{
// In a DAC build, this function assumes the input is an host address.
LIMITED_METHOD_DAC_CONTRACT;
#if !defined(DBI_COMPILE) && !defined(DACCESS_COMPILE) && defined(HOST_OSX)
ExecutableWriterHolder<void> instructionWriterHolder((LPVOID)address, sizeof(PRD_TYPE));
ULONGLONG ptraddr = dac_cast<ULONGLONG>(instructionWriterHolder.GetRW());
#else // !DBI_COMPILE && !DACCESS_COMPILE && HOST_OSX
ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
#endif // !DBI_COMPILE && !DACCESS_COMPILE && HOST_OSX
*(PRD_TYPE *)ptraddr = instruction;
FlushInstructionCache(GetCurrentProcess(),
address,
sizeof(PRD_TYPE));
}
inline PRD_TYPE CORDbgGetInstruction(UNALIGNED CORDB_ADDRESS_TYPE* address)
{
LIMITED_METHOD_CONTRACT;
ULONGLONG ptraddr = dac_cast<ULONGLONG>(address);
return *(PRD_TYPE *)ptraddr;
}
inline void SetSSFlag(DT_CONTEXT *pContext)
{
_ASSERTE(pContext != NULL);
pContext->Cpsr |= 0x00200000;
}
inline void UnsetSSFlag(DT_CONTEXT *pContext)
{
_ASSERTE(pContext != NULL);
pContext->Cpsr &= ~0x00200000;
}
inline bool IsSSFlagEnabled(DT_CONTEXT * pContext)
{
_ASSERTE(pContext != NULL);
return (pContext->Cpsr & 0x00200000) != 0;
}
#include "arm_primitives.h"
#endif // PRIMITIVES_H_
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/native/public/mono/utils/details/mono-counters-functions.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION
#ifndef MONO_API_FUNCTION
#error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header"
#endif
MONO_API_FUNCTION(void, mono_counters_enable, (int section_mask))
MONO_API_FUNCTION(void, mono_counters_init, (void))
/*
* register addr as the address of a counter of type type.
* It may be a function pointer if MONO_COUNTER_CALLBACK is specified:
* the function should return the value and take no arguments.
*/
MONO_API_FUNCTION(void, mono_counters_register, (const char* descr, int type, void *addr))
MONO_API_FUNCTION(void, mono_counters_register_with_size, (const char *name, int type, void *addr, int size))
MONO_API_FUNCTION(void, mono_counters_on_register, (MonoCounterRegisterCallback callback))
/*
* Create a readable dump of the counters for section_mask sections (ORed section values)
*/
MONO_API_FUNCTION(void, mono_counters_dump, (int section_mask, FILE *outfile))
MONO_API_FUNCTION(void, mono_counters_cleanup, (void))
MONO_API_FUNCTION(void, mono_counters_foreach, (CountersEnumCallback cb, void *user_data))
MONO_API_FUNCTION(int, mono_counters_sample, (MonoCounter *counter, void *buffer, int buffer_size))
MONO_API_FUNCTION(const char*, mono_counter_get_name, (MonoCounter *name))
MONO_API_FUNCTION(int, mono_counter_get_type, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_counter_get_section, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_counter_get_unit, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_counter_get_variance, (MonoCounter *counter))
MONO_API_FUNCTION(size_t, mono_counter_get_size, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_runtime_resource_limit, (int resource_type, uintptr_t soft_limit, uintptr_t hard_limit))
MONO_API_FUNCTION(void, mono_runtime_resource_set_callback, (MonoResourceCallback callback))
MONO_API_FUNCTION(void, mono_runtime_resource_check_limit, (int resource_type, uintptr_t value))
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION
#ifndef MONO_API_FUNCTION
#error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header"
#endif
MONO_API_FUNCTION(void, mono_counters_enable, (int section_mask))
MONO_API_FUNCTION(void, mono_counters_init, (void))
/*
* register addr as the address of a counter of type type.
* It may be a function pointer if MONO_COUNTER_CALLBACK is specified:
* the function should return the value and take no arguments.
*/
MONO_API_FUNCTION(void, mono_counters_register, (const char* descr, int type, void *addr))
MONO_API_FUNCTION(void, mono_counters_register_with_size, (const char *name, int type, void *addr, int size))
MONO_API_FUNCTION(void, mono_counters_on_register, (MonoCounterRegisterCallback callback))
/*
* Create a readable dump of the counters for section_mask sections (ORed section values)
*/
MONO_API_FUNCTION(void, mono_counters_dump, (int section_mask, FILE *outfile))
MONO_API_FUNCTION(void, mono_counters_cleanup, (void))
MONO_API_FUNCTION(void, mono_counters_foreach, (CountersEnumCallback cb, void *user_data))
MONO_API_FUNCTION(int, mono_counters_sample, (MonoCounter *counter, void *buffer, int buffer_size))
MONO_API_FUNCTION(const char*, mono_counter_get_name, (MonoCounter *name))
MONO_API_FUNCTION(int, mono_counter_get_type, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_counter_get_section, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_counter_get_unit, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_counter_get_variance, (MonoCounter *counter))
MONO_API_FUNCTION(size_t, mono_counter_get_size, (MonoCounter *counter))
MONO_API_FUNCTION(int, mono_runtime_resource_limit, (int resource_type, uintptr_t soft_limit, uintptr_t hard_limit))
MONO_API_FUNCTION(void, mono_runtime_resource_set_callback, (MonoResourceCallback callback))
MONO_API_FUNCTION(void, mono_runtime_resource_check_limit, (int resource_type, uintptr_t value))
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/metadata/class-accessors.c | /**
* \file
* Copyright 2016 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class-abi-details.h>
#ifdef MONO_CLASS_DEF_PRIVATE
#include <mono/metadata/abi-details.h>
#define REALLY_INCLUDE_CLASS_DEF 1
#include <mono/metadata/class-private-definition.h>
#undef REALLY_INCLUDE_CLASS_DEF
#endif
typedef enum {
PROP_MARSHAL_INFO = 1, /* MonoMarshalType */
PROP_REF_INFO_HANDLE = 2, /* gchandle */
PROP_EXCEPTION_DATA = 3, /* MonoErrorBoxed* */
PROP_NESTED_CLASSES = 4, /* GList* */
PROP_PROPERTY_INFO = 5, /* MonoClassPropertyInfo* */
PROP_EVENT_INFO = 6, /* MonoClassEventInfo* */
PROP_FIELD_DEF_VALUES = 7, /* MonoFieldDefaultValue* */
PROP_DECLSEC_FLAGS = 8, /* guint32 */
PROP_WEAK_BITMAP = 9,
PROP_DIM_CONFLICTS = 10, /* GSList of MonoMethod* */
PROP_FIELD_DEF_VALUES_2BYTESWIZZLE = 11, /* MonoFieldDefaultValue* with default values swizzled at 2 byte boundaries*/
PROP_FIELD_DEF_VALUES_4BYTESWIZZLE = 12, /* MonoFieldDefaultValue* with default values swizzled at 4 byte boundaries*/
PROP_FIELD_DEF_VALUES_8BYTESWIZZLE = 13, /* MonoFieldDefaultValue* with default values swizzled at 8 byte boundaries*/
PROP_METADATA_UPDATE_INFO = 14, /* MonoClassMetadataUpdateInfo* */
} InfrequentDataKind;
/* Accessors based on class kind*/
/*
* mono_class_get_generic_class:
*
* Return the MonoGenericClass of @klass, which MUST be a generic instance.
*/
MonoGenericClass*
mono_class_get_generic_class (MonoClass *klass)
{
g_assert (mono_class_is_ginst (klass));
return m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass);
}
/*
* mono_class_try_get_generic_class:
*
* Return the MonoGenericClass if @klass is a ginst, NULL otherwise
*/
MonoGenericClass*
mono_class_try_get_generic_class (MonoClass *klass)
{
if (mono_class_is_ginst (klass))
return m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass);
return NULL;
}
/**
* mono_class_get_flags:
* \param klass the MonoClass to act on
* \returns the \c TypeAttributes flags of \p klass.
* See the \c TYPE_ATTRIBUTE_* definitions in \c tabledefs.h for the different values.
*/
guint32
mono_class_get_flags (MonoClass *klass)
{
g_assert (klass);
guint32 kind = m_class_get_class_kind (klass);
switch (kind) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
return m_classdef_get_flags ((MonoClassDef*)klass);
case MONO_CLASS_GINST:
return mono_class_get_flags (m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass)->container_class);
case MONO_CLASS_GPARAM:
return TYPE_ATTRIBUTE_PUBLIC;
case MONO_CLASS_ARRAY:
/* all arrays are marked serializable and sealed, bug #42779 */
return TYPE_ATTRIBUTE_CLASS | TYPE_ATTRIBUTE_SERIALIZABLE | TYPE_ATTRIBUTE_SEALED | TYPE_ATTRIBUTE_PUBLIC;
case MONO_CLASS_POINTER:
if (m_class_get_byval_arg (klass)->type == MONO_TYPE_FNPTR)
return TYPE_ATTRIBUTE_SEALED | TYPE_ATTRIBUTE_PUBLIC;
return TYPE_ATTRIBUTE_CLASS | (mono_class_get_flags (m_class_get_element_class (klass)) & TYPE_ATTRIBUTE_VISIBILITY_MASK);
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
break;
}
g_assert_not_reached ();
}
void
mono_class_set_flags (MonoClass *klass, guint32 flags)
{
g_assert (m_class_get_class_kind (klass) == MONO_CLASS_DEF || m_class_get_class_kind (klass) == MONO_CLASS_GTD);
((MonoClassDef*)klass)->flags = flags;
}
/*
* mono_class_get_generic_container:
*
* Return the generic container of KLASS which should be a generic type definition.
*/
MonoGenericContainer*
mono_class_get_generic_container (MonoClass *klass)
{
g_assert (mono_class_is_gtd (klass));
return m_classgtd_get_generic_container ((MonoClassGtd*)klass);
}
MonoGenericContainer*
mono_class_try_get_generic_container (MonoClass *klass)
{
if (mono_class_is_gtd (klass))
return m_classgtd_get_generic_container ((MonoClassGtd*)klass);
return NULL;
}
void
mono_class_set_generic_container (MonoClass *klass, MonoGenericContainer *container)
{
g_assert (mono_class_is_gtd (klass));
((MonoClassGtd*)klass)->generic_container = container;
}
/*
* mono_class_get_first_method_idx:
*
* Return the table index of the first method for metadata classes.
*/
guint32
mono_class_get_first_method_idx (MonoClass *klass)
{
g_assert (mono_class_has_static_metadata (klass));
return m_classdef_get_first_method_idx ((MonoClassDef*)klass);
}
void
mono_class_set_first_method_idx (MonoClass *klass, guint32 idx)
{
g_assert (mono_class_has_static_metadata (klass));
((MonoClassDef*)klass)->first_method_idx = idx;
}
guint32
mono_class_get_first_field_idx (MonoClass *klass)
{
if (mono_class_is_ginst (klass))
return mono_class_get_first_field_idx (mono_class_get_generic_class (klass)->container_class);
g_assert (klass->type_token && !mono_class_is_ginst (klass));
return m_classdef_get_first_field_idx ((MonoClassDef*)klass);
}
void
mono_class_set_first_field_idx (MonoClass *klass, guint32 idx)
{
g_assert (klass->type_token && !mono_class_is_ginst (klass));
((MonoClassDef*)klass)->first_field_idx = idx;
}
guint32
mono_class_get_method_count (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
return m_classdef_get_method_count ((MonoClassDef*)klass);
case MONO_CLASS_GINST:
return mono_class_get_method_count (m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass)->container_class);
case MONO_CLASS_GPARAM:
return 0;
case MONO_CLASS_ARRAY:
return m_classarray_get_method_count ((MonoClassArray*)klass);
case MONO_CLASS_POINTER:
return 0;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
return 0;
default:
g_assert_not_reached ();
return 0;
}
}
void
mono_class_set_method_count (MonoClass *klass, guint32 count)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
((MonoClassDef*)klass)->method_count = count;
break;
case MONO_CLASS_GINST:
break;
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
g_assert (count == 0);
break;
case MONO_CLASS_ARRAY:
((MonoClassArray*)klass)->method_count = count;
break;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
break;
default:
g_assert_not_reached ();
break;
}
}
guint32
mono_class_get_field_count (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
return m_classdef_get_field_count ((MonoClassDef*)klass);
case MONO_CLASS_GINST:
return mono_class_get_field_count (m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass)->container_class);
case MONO_CLASS_GPARAM:
case MONO_CLASS_ARRAY:
case MONO_CLASS_POINTER:
return 0;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
return 0;
default:
g_assert_not_reached ();
return 0;
}
}
void
mono_class_set_field_count (MonoClass *klass, guint32 count)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
((MonoClassDef*)klass)->field_count = count;
break;
case MONO_CLASS_GINST:
break;
case MONO_CLASS_GPARAM:
case MONO_CLASS_ARRAY:
case MONO_CLASS_POINTER:
g_assert (count == 0);
break;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
break;
default:
g_assert_not_reached ();
break;
}
}
MonoMarshalType*
mono_class_get_marshal_info (MonoClass *klass)
{
return (MonoMarshalType*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_MARSHAL_INFO);
}
void
mono_class_set_marshal_info (MonoClass *klass, MonoMarshalType *marshal_info)
{
marshal_info->head.tag = PROP_MARSHAL_INFO;
mono_property_bag_add (m_class_get_infrequent_data (klass), marshal_info);
}
typedef struct {
MonoPropertyBagItem head;
guint32 value;
} Uint32Property;
typedef struct {
MonoPropertyBagItem head;
MonoGCHandle value;
} GCHandleProperty;
MonoGCHandle
mono_class_get_ref_info_handle (MonoClass *klass)
{
GCHandleProperty *prop = (GCHandleProperty*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_REF_INFO_HANDLE);
return prop ? prop->value : NULL;
}
MonoGCHandle
mono_class_set_ref_info_handle (MonoClass *klass, gpointer value)
{
if (!value) {
GCHandleProperty *prop = (GCHandleProperty*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_REF_INFO_HANDLE);
if (prop)
prop->value = NULL;
return NULL;
}
GCHandleProperty *prop = (GCHandleProperty*)mono_class_alloc (klass, sizeof (GCHandleProperty));
prop->head.tag = PROP_REF_INFO_HANDLE;
prop->value = value;
prop = (GCHandleProperty*)mono_property_bag_add (m_class_get_infrequent_data (klass), prop);
return prop->value;
}
typedef struct {
MonoPropertyBagItem head;
gpointer value;
} PointerProperty;
static void
set_pointer_property (MonoClass *klass, InfrequentDataKind property, gpointer value)
{
PointerProperty *prop = (PointerProperty*)mono_class_alloc (klass, sizeof (PointerProperty));
prop->head.tag = property;
prop->value = value;
mono_property_bag_add (m_class_get_infrequent_data (klass), prop);
}
static gpointer
get_pointer_property (MonoClass *klass, InfrequentDataKind property)
{
PointerProperty *prop = (PointerProperty*)mono_property_bag_get (m_class_get_infrequent_data (klass), property);
return prop ? prop->value : NULL;
}
MonoErrorBoxed*
mono_class_get_exception_data (MonoClass *klass)
{
return (MonoErrorBoxed*)get_pointer_property (klass, PROP_EXCEPTION_DATA);
}
void
mono_class_set_exception_data (MonoClass *klass, MonoErrorBoxed *value)
{
set_pointer_property (klass, PROP_EXCEPTION_DATA, value);
}
GList*
mono_class_get_nested_classes_property (MonoClass *klass)
{
return (GList*)get_pointer_property (klass, PROP_NESTED_CLASSES);
}
void
mono_class_set_nested_classes_property (MonoClass *klass, GList *value)
{
set_pointer_property (klass, PROP_NESTED_CLASSES, value);
}
MonoClassPropertyInfo*
mono_class_get_property_info (MonoClass *klass)
{
return (MonoClassPropertyInfo*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_PROPERTY_INFO);
}
void
mono_class_set_property_info (MonoClass *klass, MonoClassPropertyInfo *info)
{
info->head.tag = PROP_PROPERTY_INFO;
mono_property_bag_add (m_class_get_infrequent_data (klass), info);
}
MonoClassEventInfo*
mono_class_get_event_info (MonoClass *klass)
{
return (MonoClassEventInfo*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_EVENT_INFO);
}
void
mono_class_set_event_info (MonoClass *klass, MonoClassEventInfo *info)
{
info->head.tag = PROP_EVENT_INFO;
mono_property_bag_add (m_class_get_infrequent_data (klass), info);
}
MonoFieldDefaultValue*
mono_class_get_field_def_values (MonoClass *klass)
{
return (MonoFieldDefaultValue*)get_pointer_property (klass, PROP_FIELD_DEF_VALUES);
}
MonoFieldDefaultValue*
mono_class_get_field_def_values_with_swizzle (MonoClass *klass, int swizzle)
{
InfrequentDataKind dataKind = PROP_FIELD_DEF_VALUES;
if (swizzle == 2)
dataKind = PROP_FIELD_DEF_VALUES_2BYTESWIZZLE;
else if (swizzle == 4)
dataKind = PROP_FIELD_DEF_VALUES_4BYTESWIZZLE;
else if (swizzle == 8)
dataKind = PROP_FIELD_DEF_VALUES_8BYTESWIZZLE;
return (MonoFieldDefaultValue*)get_pointer_property (klass, dataKind);
}
void
mono_class_set_field_def_values (MonoClass *klass, MonoFieldDefaultValue *values)
{
set_pointer_property (klass, PROP_FIELD_DEF_VALUES, values);
}
void
mono_class_set_field_def_values_with_swizzle (MonoClass *klass, MonoFieldDefaultValue *values, int swizzle)
{
InfrequentDataKind dataKind = PROP_FIELD_DEF_VALUES;
if (swizzle == 2)
dataKind = PROP_FIELD_DEF_VALUES_2BYTESWIZZLE;
else if (swizzle == 4)
dataKind = PROP_FIELD_DEF_VALUES_4BYTESWIZZLE;
else if (swizzle == 8)
dataKind = PROP_FIELD_DEF_VALUES_8BYTESWIZZLE;
set_pointer_property (klass, dataKind, values);
}
guint32
mono_class_get_declsec_flags (MonoClass *klass)
{
Uint32Property *prop = (Uint32Property*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_DECLSEC_FLAGS);
return prop ? prop->value : 0;
}
void
mono_class_set_declsec_flags (MonoClass *klass, guint32 value)
{
Uint32Property *prop = (Uint32Property*)mono_class_alloc (klass, sizeof (Uint32Property));
prop->head.tag = PROP_DECLSEC_FLAGS;
prop->value = value;
mono_property_bag_add (m_class_get_infrequent_data (klass), prop);
}
void
mono_class_set_is_com_object (MonoClass *klass)
{
#ifndef DISABLE_COM
mono_loader_lock ();
klass->is_com_object = 1;
mono_loader_unlock ();
#endif
}
MonoType*
mono_class_gtd_get_canonical_inst (MonoClass *klass)
{
g_assert (mono_class_is_gtd (klass));
return m_classgtd_get_canonical_inst ((MonoClassGtd*)klass);
}
typedef struct {
MonoPropertyBagItem head;
int nbits;
gsize *bits;
} WeakBitmapData;
void
mono_class_set_weak_bitmap (MonoClass *klass, int nbits, gsize *bits)
{
WeakBitmapData *info = (WeakBitmapData *)mono_class_alloc (klass, sizeof (WeakBitmapData));
info->nbits = nbits;
info->bits = bits;
info->head.tag = PROP_WEAK_BITMAP;
mono_property_bag_add (m_class_get_infrequent_data (klass), info);
}
gsize*
mono_class_get_weak_bitmap (MonoClass *klass, int *nbits)
{
WeakBitmapData *prop = (WeakBitmapData*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_WEAK_BITMAP);
g_assert (prop);
*nbits = prop->nbits;
return prop->bits;
}
gboolean
mono_class_has_dim_conflicts (MonoClass *klass)
{
if (klass->has_dim_conflicts)
return TRUE;
if (mono_class_is_ginst (klass)) {
MonoClass *gklass = mono_class_get_generic_class (klass)->container_class;
return gklass->has_dim_conflicts;
}
return FALSE;
}
typedef struct {
MonoPropertyBagItem head;
GSList *data;
} DimConflictData;
void
mono_class_set_dim_conflicts (MonoClass *klass, GSList *conflicts)
{
DimConflictData *info = (DimConflictData*)mono_class_alloc (klass, sizeof (DimConflictData));
info->data = conflicts;
g_assert (!mono_class_is_ginst (klass));
info->head.tag = PROP_DIM_CONFLICTS;
mono_property_bag_add (&klass->infrequent_data, info);
}
GSList*
mono_class_get_dim_conflicts (MonoClass *klass)
{
if (mono_class_is_ginst (klass))
return mono_class_get_dim_conflicts (mono_class_get_generic_class (klass)->container_class);
DimConflictData *info = (DimConflictData*)mono_property_bag_get (&klass->infrequent_data, PROP_DIM_CONFLICTS);
g_assert (info);
return info->data;
}
/**
* mono_class_set_failure:
* \param klass class in which the failure was detected
* \param ex_type the kind of exception/error to be thrown (later)
* \param ex_data exception data (specific to each type of exception/error)
*
* Keep a detected failure informations in the class for later processing.
* Note that only the first failure is kept.
*
* LOCKING: Acquires the loader lock.
*/
gboolean
mono_class_set_failure (MonoClass *klass, MonoErrorBoxed *boxed_error)
{
g_assert (boxed_error != NULL);
if (mono_class_has_failure (klass))
return FALSE;
mono_loader_lock ();
klass->has_failure = 1;
mono_class_set_exception_data (klass, boxed_error);
mono_loader_unlock ();
return TRUE;
}
/**
* mono_class_set_nonblittable:
* \param klass class which will be marked as not blittable.
*
* Mark \c klass as not blittable.
*
* LOCKING: Acquires the loader lock.
*/
void
mono_class_set_nonblittable (MonoClass *klass) {
mono_loader_lock ();
klass->blittable = FALSE;
mono_loader_unlock ();
}
/**
* mono_class_publish_gc_descriptor:
* \param klass the \c MonoClass whose GC descriptor is to be set
* \param gc_descr the GC descriptor for \p klass
*
* Sets the \c gc_descr_inited and \c gc_descr fields of \p klass.
* \returns previous value of \c klass->gc_descr_inited
*
* LOCKING: Acquires the loader lock.
*/
gboolean
mono_class_publish_gc_descriptor (MonoClass *klass, MonoGCDescriptor gc_descr)
{
gboolean ret;
mono_loader_lock ();
ret = klass->gc_descr_inited;
klass->gc_descr = gc_descr;
mono_memory_barrier ();
klass->gc_descr_inited = TRUE;
mono_loader_unlock ();
return ret;
}
MonoClassMetadataUpdateInfo*
mono_class_get_metadata_update_info (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_GTD:
return NULL;
case MONO_CLASS_DEF:
return (MonoClassMetadataUpdateInfo *)get_pointer_property (klass, PROP_METADATA_UPDATE_INFO);
case MONO_CLASS_GINST:
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
case MONO_CLASS_GC_FILLER:
return NULL;
default:
g_assert_not_reached ();
}
}
/*
* LOCKING: assumes the loader lock is held
*/
void
mono_class_set_metadata_update_info (MonoClass *klass, MonoClassMetadataUpdateInfo *value)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_GTD:
g_assertf (0, "%s: EnC metadata update info on generic types is not supported", __func__);
break;
case MONO_CLASS_DEF:
set_pointer_property (klass, PROP_METADATA_UPDATE_INFO, value);
return;
case MONO_CLASS_GINST:
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
case MONO_CLASS_GC_FILLER:
g_assert_not_reached ();
break;
default:
g_assert_not_reached ();
}
}
gboolean
mono_class_has_metadata_update_info (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_GTD:
return FALSE;
case MONO_CLASS_DEF:
return get_pointer_property (klass, PROP_METADATA_UPDATE_INFO) != NULL;
case MONO_CLASS_GINST:
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
case MONO_CLASS_GC_FILLER:
return FALSE;
default:
g_assert_not_reached ();
}
}
#ifdef MONO_CLASS_DEF_PRIVATE
#define MONO_CLASS_GETTER(funcname, rettype, optref, argtype, fieldname) rettype funcname (argtype *klass) { return optref klass-> fieldname ; }
#define MONO_CLASS_OFFSET(funcname, argtype, fieldname) intptr_t funcname (void) { return MONO_STRUCT_OFFSET (argtype, fieldname); }
#include "class-getters.h"
#undef MONO_CLASS_GETTER
#undef MONO_CLASS_OFFSET
#endif /* MONO_CLASS_DEF_PRIVATE */
| /**
* \file
* Copyright 2016 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/class-abi-details.h>
#ifdef MONO_CLASS_DEF_PRIVATE
#include <mono/metadata/abi-details.h>
#define REALLY_INCLUDE_CLASS_DEF 1
#include <mono/metadata/class-private-definition.h>
#undef REALLY_INCLUDE_CLASS_DEF
#endif
typedef enum {
PROP_MARSHAL_INFO = 1, /* MonoMarshalType */
PROP_REF_INFO_HANDLE = 2, /* gchandle */
PROP_EXCEPTION_DATA = 3, /* MonoErrorBoxed* */
PROP_NESTED_CLASSES = 4, /* GList* */
PROP_PROPERTY_INFO = 5, /* MonoClassPropertyInfo* */
PROP_EVENT_INFO = 6, /* MonoClassEventInfo* */
PROP_FIELD_DEF_VALUES = 7, /* MonoFieldDefaultValue* */
PROP_DECLSEC_FLAGS = 8, /* guint32 */
PROP_WEAK_BITMAP = 9,
PROP_DIM_CONFLICTS = 10, /* GSList of MonoMethod* */
PROP_FIELD_DEF_VALUES_2BYTESWIZZLE = 11, /* MonoFieldDefaultValue* with default values swizzled at 2 byte boundaries*/
PROP_FIELD_DEF_VALUES_4BYTESWIZZLE = 12, /* MonoFieldDefaultValue* with default values swizzled at 4 byte boundaries*/
PROP_FIELD_DEF_VALUES_8BYTESWIZZLE = 13, /* MonoFieldDefaultValue* with default values swizzled at 8 byte boundaries*/
PROP_METADATA_UPDATE_INFO = 14, /* MonoClassMetadataUpdateInfo* */
} InfrequentDataKind;
/* Accessors based on class kind*/
/*
* mono_class_get_generic_class:
*
* Return the MonoGenericClass of @klass, which MUST be a generic instance.
*/
MonoGenericClass*
mono_class_get_generic_class (MonoClass *klass)
{
g_assert (mono_class_is_ginst (klass));
return m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass);
}
/*
* mono_class_try_get_generic_class:
*
* Return the MonoGenericClass if @klass is a ginst, NULL otherwise
*/
MonoGenericClass*
mono_class_try_get_generic_class (MonoClass *klass)
{
if (mono_class_is_ginst (klass))
return m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass);
return NULL;
}
/**
* mono_class_get_flags:
* \param klass the MonoClass to act on
* \returns the \c TypeAttributes flags of \p klass.
* See the \c TYPE_ATTRIBUTE_* definitions in \c tabledefs.h for the different values.
*/
guint32
mono_class_get_flags (MonoClass *klass)
{
g_assert (klass);
guint32 kind = m_class_get_class_kind (klass);
switch (kind) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
return m_classdef_get_flags ((MonoClassDef*)klass);
case MONO_CLASS_GINST:
return mono_class_get_flags (m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass)->container_class);
case MONO_CLASS_GPARAM:
return TYPE_ATTRIBUTE_PUBLIC;
case MONO_CLASS_ARRAY:
/* all arrays are marked serializable and sealed, bug #42779 */
return TYPE_ATTRIBUTE_CLASS | TYPE_ATTRIBUTE_SERIALIZABLE | TYPE_ATTRIBUTE_SEALED | TYPE_ATTRIBUTE_PUBLIC;
case MONO_CLASS_POINTER:
if (m_class_get_byval_arg (klass)->type == MONO_TYPE_FNPTR)
return TYPE_ATTRIBUTE_SEALED | TYPE_ATTRIBUTE_PUBLIC;
return TYPE_ATTRIBUTE_CLASS | (mono_class_get_flags (m_class_get_element_class (klass)) & TYPE_ATTRIBUTE_VISIBILITY_MASK);
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
break;
}
g_assert_not_reached ();
}
void
mono_class_set_flags (MonoClass *klass, guint32 flags)
{
g_assert (m_class_get_class_kind (klass) == MONO_CLASS_DEF || m_class_get_class_kind (klass) == MONO_CLASS_GTD);
((MonoClassDef*)klass)->flags = flags;
}
/*
* mono_class_get_generic_container:
*
* Return the generic container of KLASS which should be a generic type definition.
*/
MonoGenericContainer*
mono_class_get_generic_container (MonoClass *klass)
{
g_assert (mono_class_is_gtd (klass));
return m_classgtd_get_generic_container ((MonoClassGtd*)klass);
}
MonoGenericContainer*
mono_class_try_get_generic_container (MonoClass *klass)
{
if (mono_class_is_gtd (klass))
return m_classgtd_get_generic_container ((MonoClassGtd*)klass);
return NULL;
}
void
mono_class_set_generic_container (MonoClass *klass, MonoGenericContainer *container)
{
g_assert (mono_class_is_gtd (klass));
((MonoClassGtd*)klass)->generic_container = container;
}
/*
* mono_class_get_first_method_idx:
*
* Return the table index of the first method for metadata classes.
*/
guint32
mono_class_get_first_method_idx (MonoClass *klass)
{
g_assert (mono_class_has_static_metadata (klass));
return m_classdef_get_first_method_idx ((MonoClassDef*)klass);
}
void
mono_class_set_first_method_idx (MonoClass *klass, guint32 idx)
{
g_assert (mono_class_has_static_metadata (klass));
((MonoClassDef*)klass)->first_method_idx = idx;
}
guint32
mono_class_get_first_field_idx (MonoClass *klass)
{
if (mono_class_is_ginst (klass))
return mono_class_get_first_field_idx (mono_class_get_generic_class (klass)->container_class);
g_assert (klass->type_token && !mono_class_is_ginst (klass));
return m_classdef_get_first_field_idx ((MonoClassDef*)klass);
}
void
mono_class_set_first_field_idx (MonoClass *klass, guint32 idx)
{
g_assert (klass->type_token && !mono_class_is_ginst (klass));
((MonoClassDef*)klass)->first_field_idx = idx;
}
guint32
mono_class_get_method_count (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
return m_classdef_get_method_count ((MonoClassDef*)klass);
case MONO_CLASS_GINST:
return mono_class_get_method_count (m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass)->container_class);
case MONO_CLASS_GPARAM:
return 0;
case MONO_CLASS_ARRAY:
return m_classarray_get_method_count ((MonoClassArray*)klass);
case MONO_CLASS_POINTER:
return 0;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
return 0;
default:
g_assert_not_reached ();
return 0;
}
}
void
mono_class_set_method_count (MonoClass *klass, guint32 count)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
((MonoClassDef*)klass)->method_count = count;
break;
case MONO_CLASS_GINST:
break;
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
g_assert (count == 0);
break;
case MONO_CLASS_ARRAY:
((MonoClassArray*)klass)->method_count = count;
break;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
break;
default:
g_assert_not_reached ();
break;
}
}
guint32
mono_class_get_field_count (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
return m_classdef_get_field_count ((MonoClassDef*)klass);
case MONO_CLASS_GINST:
return mono_class_get_field_count (m_classgenericinst_get_generic_class ((MonoClassGenericInst*)klass)->container_class);
case MONO_CLASS_GPARAM:
case MONO_CLASS_ARRAY:
case MONO_CLASS_POINTER:
return 0;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
return 0;
default:
g_assert_not_reached ();
return 0;
}
}
void
mono_class_set_field_count (MonoClass *klass, guint32 count)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_DEF:
case MONO_CLASS_GTD:
((MonoClassDef*)klass)->field_count = count;
break;
case MONO_CLASS_GINST:
break;
case MONO_CLASS_GPARAM:
case MONO_CLASS_ARRAY:
case MONO_CLASS_POINTER:
g_assert (count == 0);
break;
case MONO_CLASS_GC_FILLER:
g_assertf (0, "%s: unexpected GC filler class", __func__);
break;
default:
g_assert_not_reached ();
break;
}
}
MonoMarshalType*
mono_class_get_marshal_info (MonoClass *klass)
{
return (MonoMarshalType*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_MARSHAL_INFO);
}
void
mono_class_set_marshal_info (MonoClass *klass, MonoMarshalType *marshal_info)
{
marshal_info->head.tag = PROP_MARSHAL_INFO;
mono_property_bag_add (m_class_get_infrequent_data (klass), marshal_info);
}
typedef struct {
MonoPropertyBagItem head;
guint32 value;
} Uint32Property;
typedef struct {
MonoPropertyBagItem head;
MonoGCHandle value;
} GCHandleProperty;
MonoGCHandle
mono_class_get_ref_info_handle (MonoClass *klass)
{
GCHandleProperty *prop = (GCHandleProperty*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_REF_INFO_HANDLE);
return prop ? prop->value : NULL;
}
MonoGCHandle
mono_class_set_ref_info_handle (MonoClass *klass, gpointer value)
{
if (!value) {
GCHandleProperty *prop = (GCHandleProperty*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_REF_INFO_HANDLE);
if (prop)
prop->value = NULL;
return NULL;
}
GCHandleProperty *prop = (GCHandleProperty*)mono_class_alloc (klass, sizeof (GCHandleProperty));
prop->head.tag = PROP_REF_INFO_HANDLE;
prop->value = value;
prop = (GCHandleProperty*)mono_property_bag_add (m_class_get_infrequent_data (klass), prop);
return prop->value;
}
typedef struct {
MonoPropertyBagItem head;
gpointer value;
} PointerProperty;
static void
set_pointer_property (MonoClass *klass, InfrequentDataKind property, gpointer value)
{
PointerProperty *prop = (PointerProperty*)mono_class_alloc (klass, sizeof (PointerProperty));
prop->head.tag = property;
prop->value = value;
mono_property_bag_add (m_class_get_infrequent_data (klass), prop);
}
static gpointer
get_pointer_property (MonoClass *klass, InfrequentDataKind property)
{
PointerProperty *prop = (PointerProperty*)mono_property_bag_get (m_class_get_infrequent_data (klass), property);
return prop ? prop->value : NULL;
}
MonoErrorBoxed*
mono_class_get_exception_data (MonoClass *klass)
{
return (MonoErrorBoxed*)get_pointer_property (klass, PROP_EXCEPTION_DATA);
}
void
mono_class_set_exception_data (MonoClass *klass, MonoErrorBoxed *value)
{
set_pointer_property (klass, PROP_EXCEPTION_DATA, value);
}
GList*
mono_class_get_nested_classes_property (MonoClass *klass)
{
return (GList*)get_pointer_property (klass, PROP_NESTED_CLASSES);
}
void
mono_class_set_nested_classes_property (MonoClass *klass, GList *value)
{
set_pointer_property (klass, PROP_NESTED_CLASSES, value);
}
MonoClassPropertyInfo*
mono_class_get_property_info (MonoClass *klass)
{
return (MonoClassPropertyInfo*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_PROPERTY_INFO);
}
void
mono_class_set_property_info (MonoClass *klass, MonoClassPropertyInfo *info)
{
info->head.tag = PROP_PROPERTY_INFO;
mono_property_bag_add (m_class_get_infrequent_data (klass), info);
}
MonoClassEventInfo*
mono_class_get_event_info (MonoClass *klass)
{
return (MonoClassEventInfo*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_EVENT_INFO);
}
void
mono_class_set_event_info (MonoClass *klass, MonoClassEventInfo *info)
{
info->head.tag = PROP_EVENT_INFO;
mono_property_bag_add (m_class_get_infrequent_data (klass), info);
}
MonoFieldDefaultValue*
mono_class_get_field_def_values (MonoClass *klass)
{
return (MonoFieldDefaultValue*)get_pointer_property (klass, PROP_FIELD_DEF_VALUES);
}
MonoFieldDefaultValue*
mono_class_get_field_def_values_with_swizzle (MonoClass *klass, int swizzle)
{
InfrequentDataKind dataKind = PROP_FIELD_DEF_VALUES;
if (swizzle == 2)
dataKind = PROP_FIELD_DEF_VALUES_2BYTESWIZZLE;
else if (swizzle == 4)
dataKind = PROP_FIELD_DEF_VALUES_4BYTESWIZZLE;
else if (swizzle == 8)
dataKind = PROP_FIELD_DEF_VALUES_8BYTESWIZZLE;
return (MonoFieldDefaultValue*)get_pointer_property (klass, dataKind);
}
void
mono_class_set_field_def_values (MonoClass *klass, MonoFieldDefaultValue *values)
{
set_pointer_property (klass, PROP_FIELD_DEF_VALUES, values);
}
void
mono_class_set_field_def_values_with_swizzle (MonoClass *klass, MonoFieldDefaultValue *values, int swizzle)
{
InfrequentDataKind dataKind = PROP_FIELD_DEF_VALUES;
if (swizzle == 2)
dataKind = PROP_FIELD_DEF_VALUES_2BYTESWIZZLE;
else if (swizzle == 4)
dataKind = PROP_FIELD_DEF_VALUES_4BYTESWIZZLE;
else if (swizzle == 8)
dataKind = PROP_FIELD_DEF_VALUES_8BYTESWIZZLE;
set_pointer_property (klass, dataKind, values);
}
guint32
mono_class_get_declsec_flags (MonoClass *klass)
{
Uint32Property *prop = (Uint32Property*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_DECLSEC_FLAGS);
return prop ? prop->value : 0;
}
void
mono_class_set_declsec_flags (MonoClass *klass, guint32 value)
{
Uint32Property *prop = (Uint32Property*)mono_class_alloc (klass, sizeof (Uint32Property));
prop->head.tag = PROP_DECLSEC_FLAGS;
prop->value = value;
mono_property_bag_add (m_class_get_infrequent_data (klass), prop);
}
void
mono_class_set_is_com_object (MonoClass *klass)
{
#ifndef DISABLE_COM
mono_loader_lock ();
klass->is_com_object = 1;
mono_loader_unlock ();
#endif
}
MonoType*
mono_class_gtd_get_canonical_inst (MonoClass *klass)
{
g_assert (mono_class_is_gtd (klass));
return m_classgtd_get_canonical_inst ((MonoClassGtd*)klass);
}
typedef struct {
MonoPropertyBagItem head;
int nbits;
gsize *bits;
} WeakBitmapData;
void
mono_class_set_weak_bitmap (MonoClass *klass, int nbits, gsize *bits)
{
WeakBitmapData *info = (WeakBitmapData *)mono_class_alloc (klass, sizeof (WeakBitmapData));
info->nbits = nbits;
info->bits = bits;
info->head.tag = PROP_WEAK_BITMAP;
mono_property_bag_add (m_class_get_infrequent_data (klass), info);
}
gsize*
mono_class_get_weak_bitmap (MonoClass *klass, int *nbits)
{
WeakBitmapData *prop = (WeakBitmapData*)mono_property_bag_get (m_class_get_infrequent_data (klass), PROP_WEAK_BITMAP);
g_assert (prop);
*nbits = prop->nbits;
return prop->bits;
}
gboolean
mono_class_has_dim_conflicts (MonoClass *klass)
{
if (klass->has_dim_conflicts)
return TRUE;
if (mono_class_is_ginst (klass)) {
MonoClass *gklass = mono_class_get_generic_class (klass)->container_class;
return gklass->has_dim_conflicts;
}
return FALSE;
}
typedef struct {
MonoPropertyBagItem head;
GSList *data;
} DimConflictData;
void
mono_class_set_dim_conflicts (MonoClass *klass, GSList *conflicts)
{
DimConflictData *info = (DimConflictData*)mono_class_alloc (klass, sizeof (DimConflictData));
info->data = conflicts;
g_assert (!mono_class_is_ginst (klass));
info->head.tag = PROP_DIM_CONFLICTS;
mono_property_bag_add (&klass->infrequent_data, info);
}
GSList*
mono_class_get_dim_conflicts (MonoClass *klass)
{
if (mono_class_is_ginst (klass))
return mono_class_get_dim_conflicts (mono_class_get_generic_class (klass)->container_class);
DimConflictData *info = (DimConflictData*)mono_property_bag_get (&klass->infrequent_data, PROP_DIM_CONFLICTS);
g_assert (info);
return info->data;
}
/**
* mono_class_set_failure:
* \param klass class in which the failure was detected
* \param ex_type the kind of exception/error to be thrown (later)
* \param ex_data exception data (specific to each type of exception/error)
*
* Keep a detected failure informations in the class for later processing.
* Note that only the first failure is kept.
*
* LOCKING: Acquires the loader lock.
*/
gboolean
mono_class_set_failure (MonoClass *klass, MonoErrorBoxed *boxed_error)
{
g_assert (boxed_error != NULL);
if (mono_class_has_failure (klass))
return FALSE;
mono_loader_lock ();
klass->has_failure = 1;
mono_class_set_exception_data (klass, boxed_error);
mono_loader_unlock ();
return TRUE;
}
/**
* mono_class_set_nonblittable:
* \param klass class which will be marked as not blittable.
*
* Mark \c klass as not blittable.
*
* LOCKING: Acquires the loader lock.
*/
void
mono_class_set_nonblittable (MonoClass *klass) {
mono_loader_lock ();
klass->blittable = FALSE;
mono_loader_unlock ();
}
/**
* mono_class_publish_gc_descriptor:
* \param klass the \c MonoClass whose GC descriptor is to be set
* \param gc_descr the GC descriptor for \p klass
*
* Sets the \c gc_descr_inited and \c gc_descr fields of \p klass.
* \returns previous value of \c klass->gc_descr_inited
*
* LOCKING: Acquires the loader lock.
*/
gboolean
mono_class_publish_gc_descriptor (MonoClass *klass, MonoGCDescriptor gc_descr)
{
gboolean ret;
mono_loader_lock ();
ret = klass->gc_descr_inited;
klass->gc_descr = gc_descr;
mono_memory_barrier ();
klass->gc_descr_inited = TRUE;
mono_loader_unlock ();
return ret;
}
MonoClassMetadataUpdateInfo*
mono_class_get_metadata_update_info (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_GTD:
return NULL;
case MONO_CLASS_DEF:
return (MonoClassMetadataUpdateInfo *)get_pointer_property (klass, PROP_METADATA_UPDATE_INFO);
case MONO_CLASS_GINST:
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
case MONO_CLASS_GC_FILLER:
return NULL;
default:
g_assert_not_reached ();
}
}
/*
* LOCKING: assumes the loader lock is held
*/
void
mono_class_set_metadata_update_info (MonoClass *klass, MonoClassMetadataUpdateInfo *value)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_GTD:
g_assertf (0, "%s: EnC metadata update info on generic types is not supported", __func__);
break;
case MONO_CLASS_DEF:
set_pointer_property (klass, PROP_METADATA_UPDATE_INFO, value);
return;
case MONO_CLASS_GINST:
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
case MONO_CLASS_GC_FILLER:
g_assert_not_reached ();
break;
default:
g_assert_not_reached ();
}
}
gboolean
mono_class_has_metadata_update_info (MonoClass *klass)
{
switch (m_class_get_class_kind (klass)) {
case MONO_CLASS_GTD:
return FALSE;
case MONO_CLASS_DEF:
return get_pointer_property (klass, PROP_METADATA_UPDATE_INFO) != NULL;
case MONO_CLASS_GINST:
case MONO_CLASS_GPARAM:
case MONO_CLASS_POINTER:
case MONO_CLASS_GC_FILLER:
return FALSE;
default:
g_assert_not_reached ();
}
}
#ifdef MONO_CLASS_DEF_PRIVATE
#define MONO_CLASS_GETTER(funcname, rettype, optref, argtype, fieldname) rettype funcname (argtype *klass) { return optref klass-> fieldname ; }
#define MONO_CLASS_OFFSET(funcname, argtype, fieldname) intptr_t funcname (void) { return MONO_STRUCT_OFFSET (argtype, fieldname); }
#include "class-getters.h"
#undef MONO_CLASS_GETTER
#undef MONO_CLASS_OFFSET
#endif /* MONO_CLASS_DEF_PRIVATE */
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/pal/src/libunwind/src/x86_64/Lcreate_addr_space.c | #define UNW_LOCAL_ONLY
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Gcreate_addr_space.c"
#endif
| #define UNW_LOCAL_ONLY
#include <libunwind.h>
#if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY)
#include "Gcreate_addr_space.c"
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/coreclr/tools/superpmi/superpmi/commandline.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// CommandLine.h - tiny very specific command line parser
//----------------------------------------------------------
#ifndef _CommandLine
#define _CommandLine
class CommandLine
{
public:
class Options
{
public:
Options()
: nameOfJit(nullptr)
, nameOfJit2(nullptr)
, nameOfInputMethodContextFile(nullptr)
, writeLogFile(nullptr)
, reproName(nullptr)
, breakOnError(false)
, breakOnAssert(false)
, breakOnException(false)
, applyDiff(false)
, parallel(false)
#if !defined(USE_MSVCDIS) && defined(USE_COREDISTOOLS)
, useCoreDisTools(true) // if CoreDisTools is available (but MSVCDIS is not), use it.
#else
, useCoreDisTools(false) // Otherwise, use MSVCDIS if that is available (else no diffs are available).
#endif
, skipCleanup(false)
, workerCount(-1)
, indexCount(-1)
, failureLimit(-1)
, indexes(nullptr)
, hash(nullptr)
, methodStatsTypes(nullptr)
, baseMetricsSummaryFile(nullptr)
, diffMetricsSummaryFile(nullptr)
, mclFilename(nullptr)
, diffMCLFilename(nullptr)
, targetArchitecture(nullptr)
, compileList(nullptr)
, offset(-1)
, increment(-1)
, forceJitOptions(nullptr)
, forceJit2Options(nullptr)
, jitOptions(nullptr)
, jit2Options(nullptr)
{
}
char* nameOfJit;
char* nameOfJit2;
char* nameOfInputMethodContextFile;
char* writeLogFile;
char* reproName;
bool breakOnError;
bool breakOnAssert;
bool breakOnException;
bool applyDiff;
bool parallel; // User specified to use /parallel mode.
bool useCoreDisTools; // Use CoreDisTools library instead of Msvcdis
bool skipCleanup; // In /parallel mode, do we skip cleanup of temporary files? Used for debugging /parallel.
int workerCount; // Number of workers to use for /parallel mode. -1 (or 1) means don't use parallel mode.
int indexCount; // If indexCount is -1 and hash points to nullptr it means compile all.
int failureLimit; // Number of failures after which bail out the replay/asmdiffs.
int* indexes;
char* hash;
char* methodStatsTypes;
char* baseMetricsSummaryFile;
char* diffMetricsSummaryFile;
char* mclFilename;
char* diffMCLFilename;
char* targetArchitecture;
char* compileList;
int offset;
int increment;
LightWeightMap<DWORD, DWORD>* forceJitOptions;
LightWeightMap<DWORD, DWORD>* forceJit2Options;
LightWeightMap<DWORD, DWORD>* jitOptions;
LightWeightMap<DWORD, DWORD>* jit2Options;
};
static bool Parse(int argc, char* argv[], /* OUT */ Options* o);
static bool AddJitOption(int& currArgument,
int argc,
char* argv[],
LightWeightMap<DWORD, DWORD>** pJitOptions,
LightWeightMap<DWORD, DWORD>** pForceJitOptions);
private:
static void DumpHelp(const char* program);
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// CommandLine.h - tiny very specific command line parser
//----------------------------------------------------------
#ifndef _CommandLine
#define _CommandLine
class CommandLine
{
public:
class Options
{
public:
Options()
: nameOfJit(nullptr)
, nameOfJit2(nullptr)
, nameOfInputMethodContextFile(nullptr)
, writeLogFile(nullptr)
, reproName(nullptr)
, breakOnError(false)
, breakOnAssert(false)
, breakOnException(false)
, applyDiff(false)
, parallel(false)
#if !defined(USE_MSVCDIS) && defined(USE_COREDISTOOLS)
, useCoreDisTools(true) // if CoreDisTools is available (but MSVCDIS is not), use it.
#else
, useCoreDisTools(false) // Otherwise, use MSVCDIS if that is available (else no diffs are available).
#endif
, skipCleanup(false)
, workerCount(-1)
, indexCount(-1)
, failureLimit(-1)
, indexes(nullptr)
, hash(nullptr)
, methodStatsTypes(nullptr)
, baseMetricsSummaryFile(nullptr)
, diffMetricsSummaryFile(nullptr)
, mclFilename(nullptr)
, diffMCLFilename(nullptr)
, targetArchitecture(nullptr)
, compileList(nullptr)
, offset(-1)
, increment(-1)
, forceJitOptions(nullptr)
, forceJit2Options(nullptr)
, jitOptions(nullptr)
, jit2Options(nullptr)
{
}
char* nameOfJit;
char* nameOfJit2;
char* nameOfInputMethodContextFile;
char* writeLogFile;
char* reproName;
bool breakOnError;
bool breakOnAssert;
bool breakOnException;
bool applyDiff;
bool parallel; // User specified to use /parallel mode.
bool useCoreDisTools; // Use CoreDisTools library instead of Msvcdis
bool skipCleanup; // In /parallel mode, do we skip cleanup of temporary files? Used for debugging /parallel.
int workerCount; // Number of workers to use for /parallel mode. -1 (or 1) means don't use parallel mode.
int indexCount; // If indexCount is -1 and hash points to nullptr it means compile all.
int failureLimit; // Number of failures after which bail out the replay/asmdiffs.
int* indexes;
char* hash;
char* methodStatsTypes;
char* baseMetricsSummaryFile;
char* diffMetricsSummaryFile;
char* mclFilename;
char* diffMCLFilename;
char* targetArchitecture;
char* compileList;
int offset;
int increment;
LightWeightMap<DWORD, DWORD>* forceJitOptions;
LightWeightMap<DWORD, DWORD>* forceJit2Options;
LightWeightMap<DWORD, DWORD>* jitOptions;
LightWeightMap<DWORD, DWORD>* jit2Options;
};
static bool Parse(int argc, char* argv[], /* OUT */ Options* o);
static bool AddJitOption(int& currArgument,
int argc,
char* argv[],
LightWeightMap<DWORD, DWORD>** pJitOptions,
LightWeightMap<DWORD, DWORD>** pForceJitOptions);
private:
static void DumpHelp(const char* program);
};
#endif
| -1 |
dotnet/runtime | 66,006 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…" | Reverts this as x86 still uses it.
| vargaz | 2022-03-01T15:15:53Z | 2022-03-01T20:09:47Z | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | 2dd232a53c38ac874b15fe504df275b660988294 | Revert "[mono][jit] Remove support for -O=-float32, i.e. treating r4 values a…". Reverts this as x86 still uses it.
| ./src/mono/mono/eventpipe/test/ep-file-tests.c | #include <eventpipe/ep.h>
#include <eventpipe/ep-config.h>
#include <eventpipe/ep-event.h>
#include <eventpipe/ep-event-instance.h>
#include <eventpipe/ep-file.h>
#include <eglib/test/test.h>
#define TEST_PROVIDER_NAME "MyTestProvider"
#define TEST_FILE "./ep_test_create_file.txt"
static RESULT
test_create_file (EventPipeSerializationFormat format)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeFile *file = NULL;
FileStreamWriter *file_stream_writer = NULL;
file_stream_writer = ep_file_stream_writer_alloc (TEST_FILE);
ep_raise_error_if_nok (file_stream_writer != NULL);
test_location = 1;
file = ep_file_alloc ((StreamWriter *)file_stream_writer, format);
ep_raise_error_if_nok (file != NULL);
file_stream_writer = NULL;
if (!ep_file_initialize_file (file)) {
result = FAILED ("ep_file_initialize_file failed");
ep_raise_error ();
}
test_location = 2;
ep_file_flush (file, EP_FILE_FLUSH_FLAGS_ALL_BLOCKS);
test_location = 3;
ep_on_exit:
ep_file_free (file);
ep_file_stream_writer_free (file_stream_writer);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_file_write_event (EventPipeSerializationFormat format, bool write_event, bool write_sequence_point)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeFile *file = NULL;
FileStreamWriter *file_stream_writer = NULL;
EventPipeProvider *provider = NULL;
EventPipeEvent *ep_event = NULL;
EventPipeEventInstance *ep_event_instance = NULL;
EventPipeEventMetadataEvent *metadata_event = NULL;
file_stream_writer = ep_file_stream_writer_alloc (TEST_FILE);
ep_raise_error_if_nok (file_stream_writer != NULL);
test_location = 1;
file = ep_file_alloc ((StreamWriter *)file_stream_writer, format);
ep_raise_error_if_nok (file != NULL);
file_stream_writer = NULL;
if (!ep_file_initialize_file (file)) {
result = FAILED ("ep_file_initialize_file failed");
ep_raise_error ();
}
test_location = 2;
if (write_event) {
provider = ep_create_provider (TEST_PROVIDER_NAME, NULL, NULL, NULL);
ep_raise_error_if_nok (provider != NULL);
test_location = 3;
ep_event = ep_event_alloc (provider, 1, 1, 1, EP_EVENT_LEVEL_VERBOSE, false, NULL, 0);
ep_raise_error_if_nok (ep_event != NULL);
test_location = 4;
ep_event_instance = ep_event_instance_alloc (ep_event, 0, 0, NULL, 0, NULL, NULL);
ep_raise_error_if_nok (ep_event_instance != NULL);
test_location = 5;
metadata_event = ep_build_event_metadata_event (ep_event_instance, 1);
ep_raise_error_if_nok (metadata_event != NULL);
ep_file_write_event (file, (EventPipeEventInstance *)metadata_event, 1, 1 , true);
}
if (write_sequence_point) {
EventPipeSequencePoint sequence_point;
ep_sequence_point_init (&sequence_point);
ep_file_write_sequence_point (file, &sequence_point);
ep_sequence_point_fini (&sequence_point);
}
ep_file_flush (file, EP_FILE_FLUSH_FLAGS_ALL_BLOCKS);
test_location = 6;
ep_on_exit:
ep_delete_provider (provider);
ep_event_free (ep_event);
ep_event_instance_free (ep_event_instance);
ep_event_metdata_event_free (metadata_event);
ep_file_free (file);
ep_file_stream_writer_free (file_stream_writer);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_create_file_netperf_v3 (void)
{
return test_create_file (EP_SERIALIZATION_FORMAT_NETPERF_V3);
}
static RESULT
test_create_file_nettrace_v4 (void)
{
return test_create_file (EP_SERIALIZATION_FORMAT_NETTRACE_V4);
}
static RESULT
test_file_write_event_netperf_v3 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETPERF_V3, true, false);
}
static RESULT
test_file_write_event_nettrace_v4 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETTRACE_V4, true, false);
}
static RESULT
test_file_write_sequence_point_netperf_v3 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETPERF_V3, false, true);
}
static RESULT
test_file_write_sequence_point_nettrace_v4 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETTRACE_V4, false, true);
}
static Test ep_file_tests [] = {
{"test_create_file_netperf_v3", test_create_file_netperf_v3},
{"test_create_file_nettrace_v4", test_create_file_nettrace_v4},
{"test_file_write_event_netperf_v3", test_file_write_event_netperf_v3},
{"test_file_write_event_nettrace_v4", test_file_write_event_nettrace_v4},
{"test_file_write_sequence_point_netperf_v3", test_file_write_sequence_point_netperf_v3},
{"test_file_write_sequence_point_nettrace_v4", test_file_write_sequence_point_nettrace_v4},
{NULL, NULL}
};
DEFINE_TEST_GROUP_INIT(ep_file_tests_init, ep_file_tests)
| #include <eventpipe/ep.h>
#include <eventpipe/ep-config.h>
#include <eventpipe/ep-event.h>
#include <eventpipe/ep-event-instance.h>
#include <eventpipe/ep-file.h>
#include <eglib/test/test.h>
#define TEST_PROVIDER_NAME "MyTestProvider"
#define TEST_FILE "./ep_test_create_file.txt"
static RESULT
test_create_file (EventPipeSerializationFormat format)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeFile *file = NULL;
FileStreamWriter *file_stream_writer = NULL;
file_stream_writer = ep_file_stream_writer_alloc (TEST_FILE);
ep_raise_error_if_nok (file_stream_writer != NULL);
test_location = 1;
file = ep_file_alloc ((StreamWriter *)file_stream_writer, format);
ep_raise_error_if_nok (file != NULL);
file_stream_writer = NULL;
if (!ep_file_initialize_file (file)) {
result = FAILED ("ep_file_initialize_file failed");
ep_raise_error ();
}
test_location = 2;
ep_file_flush (file, EP_FILE_FLUSH_FLAGS_ALL_BLOCKS);
test_location = 3;
ep_on_exit:
ep_file_free (file);
ep_file_stream_writer_free (file_stream_writer);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_file_write_event (EventPipeSerializationFormat format, bool write_event, bool write_sequence_point)
{
RESULT result = NULL;
uint32_t test_location = 0;
EventPipeFile *file = NULL;
FileStreamWriter *file_stream_writer = NULL;
EventPipeProvider *provider = NULL;
EventPipeEvent *ep_event = NULL;
EventPipeEventInstance *ep_event_instance = NULL;
EventPipeEventMetadataEvent *metadata_event = NULL;
file_stream_writer = ep_file_stream_writer_alloc (TEST_FILE);
ep_raise_error_if_nok (file_stream_writer != NULL);
test_location = 1;
file = ep_file_alloc ((StreamWriter *)file_stream_writer, format);
ep_raise_error_if_nok (file != NULL);
file_stream_writer = NULL;
if (!ep_file_initialize_file (file)) {
result = FAILED ("ep_file_initialize_file failed");
ep_raise_error ();
}
test_location = 2;
if (write_event) {
provider = ep_create_provider (TEST_PROVIDER_NAME, NULL, NULL, NULL);
ep_raise_error_if_nok (provider != NULL);
test_location = 3;
ep_event = ep_event_alloc (provider, 1, 1, 1, EP_EVENT_LEVEL_VERBOSE, false, NULL, 0);
ep_raise_error_if_nok (ep_event != NULL);
test_location = 4;
ep_event_instance = ep_event_instance_alloc (ep_event, 0, 0, NULL, 0, NULL, NULL);
ep_raise_error_if_nok (ep_event_instance != NULL);
test_location = 5;
metadata_event = ep_build_event_metadata_event (ep_event_instance, 1);
ep_raise_error_if_nok (metadata_event != NULL);
ep_file_write_event (file, (EventPipeEventInstance *)metadata_event, 1, 1 , true);
}
if (write_sequence_point) {
EventPipeSequencePoint sequence_point;
ep_sequence_point_init (&sequence_point);
ep_file_write_sequence_point (file, &sequence_point);
ep_sequence_point_fini (&sequence_point);
}
ep_file_flush (file, EP_FILE_FLUSH_FLAGS_ALL_BLOCKS);
test_location = 6;
ep_on_exit:
ep_delete_provider (provider);
ep_event_free (ep_event);
ep_event_instance_free (ep_event_instance);
ep_event_metdata_event_free (metadata_event);
ep_file_free (file);
ep_file_stream_writer_free (file_stream_writer);
return result;
ep_on_error:
if (!result)
result = FAILED ("Failed at test location=%i", test_location);
ep_exit_error_handler ();
}
static RESULT
test_create_file_netperf_v3 (void)
{
return test_create_file (EP_SERIALIZATION_FORMAT_NETPERF_V3);
}
static RESULT
test_create_file_nettrace_v4 (void)
{
return test_create_file (EP_SERIALIZATION_FORMAT_NETTRACE_V4);
}
static RESULT
test_file_write_event_netperf_v3 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETPERF_V3, true, false);
}
static RESULT
test_file_write_event_nettrace_v4 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETTRACE_V4, true, false);
}
static RESULT
test_file_write_sequence_point_netperf_v3 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETPERF_V3, false, true);
}
static RESULT
test_file_write_sequence_point_nettrace_v4 (void)
{
return test_file_write_event (EP_SERIALIZATION_FORMAT_NETTRACE_V4, false, true);
}
static Test ep_file_tests [] = {
{"test_create_file_netperf_v3", test_create_file_netperf_v3},
{"test_create_file_nettrace_v4", test_create_file_nettrace_v4},
{"test_file_write_event_netperf_v3", test_file_write_event_netperf_v3},
{"test_file_write_event_nettrace_v4", test_file_write_event_nettrace_v4},
{"test_file_write_sequence_point_netperf_v3", test_file_write_sequence_point_netperf_v3},
{"test_file_write_sequence_point_nettrace_v4", test_file_write_sequence_point_nettrace_v4},
{NULL, NULL}
};
DEFINE_TEST_GROUP_INIT(ep_file_tests_init, ep_file_tests)
| -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.